Reorganize architecture-dependent header files
authorAntonio Nino Diaz <[email protected]>
Mon, 17 Dec 2018 17:20:57 +0000 (17:20 +0000)
committerAntonio Nino Diaz <[email protected]>
Fri, 4 Jan 2019 10:43:16 +0000 (10:43 +0000)
The architecture dependant header files in include/lib/${ARCH} and
include/common/${ARCH} have been moved to /include/arch/${ARCH}.

Change-Id: I96f30fdb80b191a51448ddf11b1d4a0624c03394
Signed-off-by: Antonio Nino Diaz <[email protected]>
33 files changed:
Makefile
include/arch/aarch32/arch.h [new file with mode: 0644]
include/arch/aarch32/arch_helpers.h [new file with mode: 0644]
include/arch/aarch32/asm_macros.S [new file with mode: 0644]
include/arch/aarch32/assert_macros.S [new file with mode: 0644]
include/arch/aarch32/console_macros.S [new file with mode: 0644]
include/arch/aarch32/el3_common_macros.S [new file with mode: 0644]
include/arch/aarch32/smccc_helpers.h [new file with mode: 0644]
include/arch/aarch32/smccc_macros.S [new file with mode: 0644]
include/arch/aarch64/arch.h [new file with mode: 0644]
include/arch/aarch64/arch_helpers.h [new file with mode: 0644]
include/arch/aarch64/asm_macros.S [new file with mode: 0644]
include/arch/aarch64/assert_macros.S [new file with mode: 0644]
include/arch/aarch64/console_macros.S [new file with mode: 0644]
include/arch/aarch64/el3_common_macros.S [new file with mode: 0644]
include/arch/aarch64/setjmp.h [new file with mode: 0644]
include/arch/aarch64/smccc_helpers.h [new file with mode: 0644]
include/common/aarch32/asm_macros.S [deleted file]
include/common/aarch32/assert_macros.S [deleted file]
include/common/aarch32/console_macros.S [deleted file]
include/common/aarch32/el3_common_macros.S [deleted file]
include/common/aarch64/asm_macros.S [deleted file]
include/common/aarch64/assert_macros.S [deleted file]
include/common/aarch64/console_macros.S [deleted file]
include/common/aarch64/el3_common_macros.S [deleted file]
include/lib/aarch32/arch.h [deleted file]
include/lib/aarch32/arch_helpers.h [deleted file]
include/lib/aarch32/smccc_helpers.h [deleted file]
include/lib/aarch32/smccc_macros.S [deleted file]
include/lib/aarch64/arch.h [deleted file]
include/lib/aarch64/arch_helpers.h [deleted file]
include/lib/aarch64/setjmp.h [deleted file]
include/lib/aarch64/smccc_helpers.h [deleted file]

index 1b48fe1ebc8dc68775f5756add1762504ee8f927..6b5b0803ef37474d52a19339561240cbbcf39ad3 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -270,19 +270,18 @@ BL_COMMON_SOURCES +=      lib/${ARCH}/armclang_printf.S
 endif
 
 INCLUDES               +=      -Iinclude                               \
+                               -Iinclude/arch/${ARCH}                  \
                                -Iinclude/bl1                           \
                                -Iinclude/bl2                           \
                                -Iinclude/bl2u                          \
                                -Iinclude/bl31                          \
                                -Iinclude/common                        \
-                               -Iinclude/common/${ARCH}                \
                                -Iinclude/drivers                       \
                                -Iinclude/drivers/arm                   \
                                -Iinclude/drivers/auth                  \
                                -Iinclude/drivers/io                    \
                                -Iinclude/drivers/ti/uart               \
                                -Iinclude/lib                           \
-                               -Iinclude/lib/${ARCH}                   \
                                -Iinclude/lib/cpus                      \
                                -Iinclude/lib/cpus/${ARCH}              \
                                -Iinclude/lib/el3_runtime               \
diff --git a/include/arch/aarch32/arch.h b/include/arch/aarch32/arch.h
new file mode 100644 (file)
index 0000000..8260c54
--- /dev/null
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_H
+#define ARCH_H
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK         U(0xff)
+#define MIDR_IMPL_SHIFT                U(24)
+#define MIDR_VAR_SHIFT         U(20)
+#define MIDR_VAR_BITS          U(4)
+#define MIDR_REV_SHIFT         U(0)
+#define MIDR_REV_BITS          U(4)
+#define MIDR_PN_MASK           U(0xfff)
+#define MIDR_PN_SHIFT          U(4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK          (U(1) << 24)
+#define MPIDR_CPU_MASK         MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK     (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS    U(8)
+#define MPIDR_AFFLVL_MASK      U(0xff)
+#define MPIDR_AFFLVL_SHIFT     U(3)
+#define MPIDR_AFF0_SHIFT       U(0)
+#define MPIDR_AFF1_SHIFT       U(8)
+#define MPIDR_AFF2_SHIFT       U(16)
+#define MPIDR_AFF_SHIFT(_n)    MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK    U(0x00ffffff)
+#define MPIDR_AFFLVL0          U(0)
+#define MPIDR_AFFLVL1          U(1)
+#define MPIDR_AFFLVL2          U(2)
+#define MPIDR_AFFLVL(_n)       MPIDR_AFFLVL##_n
+
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+               (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+               (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+               (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr)       U(0)
+
+#define MPIDR_AFF_ID(mpid, n)                                  \
+       (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+#define MPID_MASK              (MPIDR_MT_MASK                          |\
+                                (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT)|\
+                                (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT)|\
+                                (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID           U(0xFFFFFFFF)
+
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ */
+#define MPIDR_MAX_AFFLVL       U(2)
+
+/* Data Cache set/way op type defines */
+#define DC_OP_ISW                      U(0x0)
+#define DC_OP_CISW                     U(0x1)
+#define DC_OP_CSW                      U(0x2)
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF                      U(0x000)
+#define CNTFID_OFF                     U(0x020)
+
+#define CNTCR_EN                       (U(1) << 0)
+#define CNTCR_HDBG                     (U(1) << 1)
+#define CNTCR_FCREQ(x)                 ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT            U(21)
+#define LOC_SHIFT              U(24)
+#define CLIDR_FIELD_WIDTH      U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT            U(1)
+
+/* ID_PFR0 AMU definitions */
+#define ID_PFR0_AMU_SHIFT      U(20)
+#define ID_PFR0_AMU_LENGTH     U(4)
+#define ID_PFR0_AMU_MASK       U(0xf)
+
+/* ID_PFR0 DIT definitions */
+#define ID_PFR0_DIT_SHIFT      U(24)
+#define ID_PFR0_DIT_LENGTH     U(4)
+#define ID_PFR0_DIT_MASK       U(0xf)
+#define ID_PFR0_DIT_SUPPORTED  (U(1) << ID_PFR0_DIT_SHIFT)
+
+/* ID_PFR1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT  U(12)
+#define ID_PFR1_VIRTEXT_MASK   U(0xf)
+#define GET_VIRT_EXT(id)       (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+                                & ID_PFR1_VIRTEXT_MASK)
+#define ID_PFR1_GIC_SHIFT      U(28)
+#define ID_PFR1_GIC_MASK       U(0xf)
+
+/* SCTLR definitions */
+#define SCTLR_RES1_DEF         ((U(1) << 23) | (U(1) << 22) | (U(1) << 4) | \
+                                (U(1) << 3))
+#if ARM_ARCH_MAJOR == 7
+#define SCTLR_RES1             SCTLR_RES1_DEF
+#else
+#define SCTLR_RES1             (SCTLR_RES1_DEF | (U(1) << 11))
+#endif
+#define SCTLR_M_BIT            (U(1) << 0)
+#define SCTLR_A_BIT            (U(1) << 1)
+#define SCTLR_C_BIT            (U(1) << 2)
+#define SCTLR_CP15BEN_BIT      (U(1) << 5)
+#define SCTLR_ITD_BIT          (U(1) << 7)
+#define SCTLR_Z_BIT            (U(1) << 11)
+#define SCTLR_I_BIT            (U(1) << 12)
+#define SCTLR_V_BIT            (U(1) << 13)
+#define SCTLR_RR_BIT           (U(1) << 14)
+#define SCTLR_NTWI_BIT         (U(1) << 16)
+#define SCTLR_NTWE_BIT         (U(1) << 18)
+#define SCTLR_WXN_BIT          (U(1) << 19)
+#define SCTLR_UWXN_BIT         (U(1) << 20)
+#define SCTLR_EE_BIT           (U(1) << 25)
+#define SCTLR_TRE_BIT          (U(1) << 28)
+#define SCTLR_AFE_BIT          (U(1) << 29)
+#define SCTLR_TE_BIT           (U(1) << 30)
+#define SCTLR_DSSBS_BIT                (U(1) << 31)
+#define SCTLR_RESET_VAL         (SCTLR_RES1 | SCTLR_NTWE_BIT |         \
+                               SCTLR_NTWI_BIT | SCTLR_CP15BEN_BIT)
+
+/* SDCR definitions */
+#define SDCR_SPD(x)            ((x) << 14)
+#define SDCR_SPD_LEGACY                U(0x0)
+#define SDCR_SPD_DISABLE       U(0x2)
+#define SDCR_SPD_ENABLE                U(0x3)
+#define SDCR_RESET_VAL         U(0x0)
+
+/* HSCTLR definitions */
+#define HSCTLR_RES1    ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+                        (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+                        (U(1) << 11) | (U(1) << 4) | (U(1) << 3))
+
+#define HSCTLR_M_BIT           (U(1) << 0)
+#define HSCTLR_A_BIT           (U(1) << 1)
+#define HSCTLR_C_BIT           (U(1) << 2)
+#define HSCTLR_CP15BEN_BIT     (U(1) << 5)
+#define HSCTLR_ITD_BIT         (U(1) << 7)
+#define HSCTLR_SED_BIT         (U(1) << 8)
+#define HSCTLR_I_BIT           (U(1) << 12)
+#define HSCTLR_WXN_BIT         (U(1) << 19)
+#define HSCTLR_EE_BIT          (U(1) << 25)
+#define HSCTLR_TE_BIT          (U(1) << 30)
+
+/* CPACR definitions */
+#define CPACR_FPEN(x)          ((x) << 20)
+#define CPACR_FP_TRAP_PL0      U(0x1)
+#define CPACR_FP_TRAP_ALL      U(0x2)
+#define CPACR_FP_TRAP_NONE     U(0x3)
+
+/* SCR definitions */
+#define SCR_TWE_BIT            (U(1) << 13)
+#define SCR_TWI_BIT            (U(1) << 12)
+#define SCR_SIF_BIT            (U(1) << 9)
+#define SCR_HCE_BIT            (U(1) << 8)
+#define SCR_SCD_BIT            (U(1) << 7)
+#define SCR_NET_BIT            (U(1) << 6)
+#define SCR_AW_BIT             (U(1) << 5)
+#define SCR_FW_BIT             (U(1) << 4)
+#define SCR_EA_BIT             (U(1) << 3)
+#define SCR_FIQ_BIT            (U(1) << 2)
+#define SCR_IRQ_BIT            (U(1) << 1)
+#define SCR_NS_BIT             (U(1) << 0)
+#define SCR_VALID_BIT_MASK     U(0x33ff)
+#define SCR_RESET_VAL          U(0x0)
+
+#define GET_NS_BIT(scr)                ((scr) & SCR_NS_BIT)
+
+/* HCR definitions */
+#define HCR_TGE_BIT            (U(1) << 27)
+#define HCR_AMO_BIT            (U(1) << 5)
+#define HCR_IMO_BIT            (U(1) << 4)
+#define HCR_FMO_BIT            (U(1) << 3)
+#define HCR_RESET_VAL          U(0x0)
+
+/* CNTHCTL definitions */
+#define CNTHCTL_RESET_VAL      U(0x0)
+#define PL1PCEN_BIT            (U(1) << 1)
+#define PL1PCTEN_BIT           (U(1) << 0)
+
+/* CNTKCTL definitions */
+#define PL0PTEN_BIT            (U(1) << 9)
+#define PL0VTEN_BIT            (U(1) << 8)
+#define PL0PCTEN_BIT           (U(1) << 0)
+#define PL0VCTEN_BIT           (U(1) << 1)
+#define EVNTEN_BIT             (U(1) << 2)
+#define EVNTDIR_BIT            (U(1) << 3)
+#define EVNTI_SHIFT            U(4)
+#define EVNTI_MASK             U(0xf)
+
+/* HCPTR definitions */
+#define HCPTR_RES1             ((U(1) << 13) | (U(1) << 12) | U(0x3ff))
+#define TCPAC_BIT              (U(1) << 31)
+#define TAM_BIT                        (U(1) << 30)
+#define TTA_BIT                        (U(1) << 20)
+#define TCP11_BIT              (U(1) << 11)
+#define TCP10_BIT              (U(1) << 10)
+#define HCPTR_RESET_VAL                HCPTR_RES1
+
+/* VTTBR defintions */
+#define VTTBR_RESET_VAL                ULL(0x0)
+#define VTTBR_VMID_MASK                ULL(0xff)
+#define VTTBR_VMID_SHIFT       U(48)
+#define VTTBR_BADDR_MASK       ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT      U(0)
+
+/* HDCR definitions */
+#define HDCR_RESET_VAL         U(0x0)
+
+/* HSTR definitions */
+#define HSTR_RESET_VAL         U(0x0)
+
+/* CNTHP_CTL definitions */
+#define CNTHP_CTL_RESET_VAL    U(0x0)
+
+/* NSACR definitions */
+#define NSASEDIS_BIT           (U(1) << 15)
+#define NSTRCDIS_BIT           (U(1) << 20)
+#define NSACR_CP11_BIT         (U(1) << 11)
+#define NSACR_CP10_BIT         (U(1) << 10)
+#define NSACR_IMP_DEF_MASK     (U(0x7) << 16)
+#define NSACR_ENABLE_FP_ACCESS (NSACR_CP11_BIT | NSACR_CP10_BIT)
+#define NSACR_RESET_VAL                U(0x0)
+
+/* CPACR definitions */
+#define ASEDIS_BIT             (U(1) << 31)
+#define TRCDIS_BIT             (U(1) << 28)
+#define CPACR_CP11_SHIFT       U(22)
+#define CPACR_CP10_SHIFT       U(20)
+#define CPACR_ENABLE_FP_ACCESS ((U(0x3) << CPACR_CP11_SHIFT) |\
+                                (U(0x3) << CPACR_CP10_SHIFT))
+#define CPACR_RESET_VAL         U(0x0)
+
+/* FPEXC definitions */
+#define FPEXC_RES1             ((U(1) << 10) | (U(1) << 9) | (U(1) << 8))
+#define FPEXC_EN_BIT           (U(1) << 30)
+#define FPEXC_RESET_VAL                FPEXC_RES1
+
+/* SPSR/CPSR definitions */
+#define SPSR_FIQ_BIT           (U(1) << 0)
+#define SPSR_IRQ_BIT           (U(1) << 1)
+#define SPSR_ABT_BIT           (U(1) << 2)
+#define SPSR_AIF_SHIFT         U(6)
+#define SPSR_AIF_MASK          U(0x7)
+
+#define SPSR_E_SHIFT           U(9)
+#define SPSR_E_MASK            U(0x1)
+#define SPSR_E_LITTLE          U(0)
+#define SPSR_E_BIG             U(1)
+
+#define SPSR_T_SHIFT           U(5)
+#define SPSR_T_MASK            U(0x1)
+#define SPSR_T_ARM             U(0)
+#define SPSR_T_THUMB           U(1)
+
+#define SPSR_MODE_SHIFT                U(0)
+#define SPSR_MODE_MASK         U(0x7)
+
+#define DISABLE_ALL_EXCEPTIONS \
+               (SPSR_FIQ_BIT | SPSR_IRQ_BIT | SPSR_ABT_BIT)
+
+#define CPSR_DIT_BIT           (U(1) << 21)
+/*
+ * TTBCR definitions
+ */
+#define TTBCR_EAE_BIT          (U(1) << 31)
+
+#define TTBCR_SH1_NON_SHAREABLE                (U(0x0) << 28)
+#define TTBCR_SH1_OUTER_SHAREABLE      (U(0x2) << 28)
+#define TTBCR_SH1_INNER_SHAREABLE      (U(0x3) << 28)
+
+#define TTBCR_RGN1_OUTER_NC    (U(0x0) << 26)
+#define TTBCR_RGN1_OUTER_WBA   (U(0x1) << 26)
+#define TTBCR_RGN1_OUTER_WT    (U(0x2) << 26)
+#define TTBCR_RGN1_OUTER_WBNA  (U(0x3) << 26)
+
+#define TTBCR_RGN1_INNER_NC    (U(0x0) << 24)
+#define TTBCR_RGN1_INNER_WBA   (U(0x1) << 24)
+#define TTBCR_RGN1_INNER_WT    (U(0x2) << 24)
+#define TTBCR_RGN1_INNER_WBNA  (U(0x3) << 24)
+
+#define TTBCR_EPD1_BIT         (U(1) << 23)
+#define TTBCR_A1_BIT           (U(1) << 22)
+
+#define TTBCR_T1SZ_SHIFT       U(16)
+#define TTBCR_T1SZ_MASK                U(0x7)
+#define TTBCR_TxSZ_MIN         U(0)
+#define TTBCR_TxSZ_MAX         U(7)
+
+#define TTBCR_SH0_NON_SHAREABLE                (U(0x0) << 12)
+#define TTBCR_SH0_OUTER_SHAREABLE      (U(0x2) << 12)
+#define TTBCR_SH0_INNER_SHAREABLE      (U(0x3) << 12)
+
+#define TTBCR_RGN0_OUTER_NC    (U(0x0) << 10)
+#define TTBCR_RGN0_OUTER_WBA   (U(0x1) << 10)
+#define TTBCR_RGN0_OUTER_WT    (U(0x2) << 10)
+#define TTBCR_RGN0_OUTER_WBNA  (U(0x3) << 10)
+
+#define TTBCR_RGN0_INNER_NC    (U(0x0) << 8)
+#define TTBCR_RGN0_INNER_WBA   (U(0x1) << 8)
+#define TTBCR_RGN0_INNER_WT    (U(0x2) << 8)
+#define TTBCR_RGN0_INNER_WBNA  (U(0x3) << 8)
+
+#define TTBCR_EPD0_BIT         (U(1) << 7)
+#define TTBCR_T0SZ_SHIFT       U(0)
+#define TTBCR_T0SZ_MASK                U(0x7)
+
+/*
+ * HTCR definitions
+ */
+#define HTCR_RES1                      ((U(1) << 31) | (U(1) << 23))
+
+#define HTCR_SH0_NON_SHAREABLE         (U(0x0) << 12)
+#define HTCR_SH0_OUTER_SHAREABLE       (U(0x2) << 12)
+#define HTCR_SH0_INNER_SHAREABLE       (U(0x3) << 12)
+
+#define HTCR_RGN0_OUTER_NC     (U(0x0) << 10)
+#define HTCR_RGN0_OUTER_WBA    (U(0x1) << 10)
+#define HTCR_RGN0_OUTER_WT     (U(0x2) << 10)
+#define HTCR_RGN0_OUTER_WBNA   (U(0x3) << 10)
+
+#define HTCR_RGN0_INNER_NC     (U(0x0) << 8)
+#define HTCR_RGN0_INNER_WBA    (U(0x1) << 8)
+#define HTCR_RGN0_INNER_WT     (U(0x2) << 8)
+#define HTCR_RGN0_INNER_WBNA   (U(0x3) << 8)
+
+#define HTCR_T0SZ_SHIFT                U(0)
+#define HTCR_T0SZ_MASK         U(0x7)
+
+#define MODE_RW_SHIFT          U(0x4)
+#define MODE_RW_MASK           U(0x1)
+#define MODE_RW_32             U(0x1)
+
+#define MODE32_SHIFT           U(0)
+#define MODE32_MASK            U(0x1f)
+#define MODE32_usr             U(0x10)
+#define MODE32_fiq             U(0x11)
+#define MODE32_irq             U(0x12)
+#define MODE32_svc             U(0x13)
+#define MODE32_mon             U(0x16)
+#define MODE32_abt             U(0x17)
+#define MODE32_hyp             U(0x1a)
+#define MODE32_und             U(0x1b)
+#define MODE32_sys             U(0x1f)
+
+#define GET_M32(mode)          (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_MODE32(mode, isa, endian, aif)            \
+       (MODE_RW_32 << MODE_RW_SHIFT |                  \
+       ((mode) & MODE32_MASK) << MODE32_SHIFT |        \
+       ((isa) & SPSR_T_MASK) << SPSR_T_SHIFT |         \
+       ((endian) & SPSR_E_MASK) << SPSR_E_SHIFT |      \
+       ((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
+
+/*
+ * TTBR definitions
+ */
+#define TTBR_CNP_BIT           ULL(0x1)
+
+/*
+ * CTR definitions
+ */
+#define CTR_CWG_SHIFT          U(24)
+#define CTR_CWG_MASK           U(0xf)
+#define CTR_ERG_SHIFT          U(20)
+#define CTR_ERG_MASK           U(0xf)
+#define CTR_DMINLINE_SHIFT     U(16)
+#define CTR_DMINLINE_WIDTH     U(4)
+#define CTR_DMINLINE_MASK      ((U(1) << 4) - U(1))
+#define CTR_L1IP_SHIFT         U(14)
+#define CTR_L1IP_MASK          U(0x3)
+#define CTR_IMINLINE_SHIFT     U(0)
+#define CTR_IMINLINE_MASK      U(0xf)
+
+#define MAX_CACHE_LINE_SIZE    U(0x800) /* 2KB */
+
+/* PMCR definitions */
+#define PMCR_N_SHIFT           U(11)
+#define PMCR_N_MASK            U(0x1f)
+#define PMCR_N_BITS            (PMCR_N_MASK << PMCR_N_SHIFT)
+#define PMCR_LC_BIT            (U(1) << 6)
+#define PMCR_DP_BIT            (U(1) << 5)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT                U(0)
+#define TLBI_ADDR_MASK         U(0xFFFFF000)
+#define TLBI_ADDR(x)           (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTCTLBASE_CNTFRQ      U(0x0)
+#define CNTNSAR                        U(0x4)
+#define CNTNSAR_NS_SHIFT(x)    (x)
+
+#define CNTACR_BASE(x)         (U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT      U(0x0)
+#define CNTACR_RVCT_SHIFT      U(0x1)
+#define CNTACR_RFRQ_SHIFT      U(0x2)
+#define CNTACR_RVOFF_SHIFT     U(0x3)
+#define CNTACR_RWVT_SHIFT      U(0x4)
+#define CNTACR_RWPT_SHIFT      U(0x5)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO              U(0x0)
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ                U(0x10)
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO           U(0x20)
+/* Physical Timer Control register. */
+#define CNTP_CTL               U(0x2c)
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT   0
+#define CNTP_CTL_IMASK_SHIFT    1
+#define CNTP_CTL_ISTATUS_SHIFT  2
+
+#define CNTP_CTL_ENABLE_MASK    U(1)
+#define CNTP_CTL_IMASK_MASK     U(1)
+#define CNTP_CTL_ISTATUS_MASK   U(1)
+
+/* MAIR macros */
+#define MAIR0_ATTR_SET(attr, index)    ((attr) << ((index) << U(3)))
+#define MAIR1_ATTR_SET(attr, index)    ((attr) << (((index) - U(3)) << U(3)))
+
+/* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */
+#define SCR            p15, 0, c1, c1, 0
+#define SCTLR          p15, 0, c1, c0, 0
+#define ACTLR          p15, 0, c1, c0, 1
+#define SDCR           p15, 0, c1, c3, 1
+#define MPIDR          p15, 0, c0, c0, 5
+#define MIDR           p15, 0, c0, c0, 0
+#define HVBAR          p15, 4, c12, c0, 0
+#define VBAR           p15, 0, c12, c0, 0
+#define MVBAR          p15, 0, c12, c0, 1
+#define NSACR          p15, 0, c1, c1, 2
+#define CPACR          p15, 0, c1, c0, 2
+#define DCCIMVAC       p15, 0, c7, c14, 1
+#define DCCMVAC                p15, 0, c7, c10, 1
+#define DCIMVAC                p15, 0, c7, c6, 1
+#define DCCISW         p15, 0, c7, c14, 2
+#define DCCSW          p15, 0, c7, c10, 2
+#define DCISW          p15, 0, c7, c6, 2
+#define CTR            p15, 0, c0, c0, 1
+#define CNTFRQ         p15, 0, c14, c0, 0
+#define ID_PFR0                p15, 0, c0, c1, 0
+#define ID_PFR1                p15, 0, c0, c1, 1
+#define MAIR0          p15, 0, c10, c2, 0
+#define MAIR1          p15, 0, c10, c2, 1
+#define TTBCR          p15, 0, c2, c0, 2
+#define TTBR0          p15, 0, c2, c0, 0
+#define TTBR1          p15, 0, c2, c0, 1
+#define TLBIALL                p15, 0, c8, c7, 0
+#define TLBIALLH       p15, 4, c8, c7, 0
+#define TLBIALLIS      p15, 0, c8, c3, 0
+#define TLBIMVA                p15, 0, c8, c7, 1
+#define TLBIMVAA       p15, 0, c8, c7, 3
+#define TLBIMVAAIS     p15, 0, c8, c3, 3
+#define TLBIMVAHIS     p15, 4, c8, c3, 1
+#define BPIALLIS       p15, 0, c7, c1, 6
+#define BPIALL         p15, 0, c7, c5, 6
+#define ICIALLU                p15, 0, c7, c5, 0
+#define HSCTLR         p15, 4, c1, c0, 0
+#define HCR            p15, 4, c1, c1, 0
+#define HCPTR          p15, 4, c1, c1, 2
+#define HSTR           p15, 4, c1, c1, 3
+#define CNTHCTL                p15, 4, c14, c1, 0
+#define CNTKCTL                p15, 0, c14, c1, 0
+#define VPIDR          p15, 4, c0, c0, 0
+#define VMPIDR         p15, 4, c0, c0, 5
+#define ISR            p15, 0, c12, c1, 0
+#define CLIDR          p15, 1, c0, c0, 1
+#define CSSELR         p15, 2, c0, c0, 0
+#define CCSIDR         p15, 1, c0, c0, 0
+#define HTCR           p15, 4, c2, c0, 2
+#define HMAIR0         p15, 4, c10, c2, 0
+#define ATS1CPR                p15, 0, c7, c8, 0
+#define ATS1HR         p15, 4, c7, c8, 0
+#define DBGOSDLR       p14, 0, c1, c3, 4
+
+/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define HDCR           p15, 4, c1, c1, 1
+#define PMCR           p15, 0, c9, c12, 0
+#define CNTHP_TVAL     p15, 4, c14, c2, 0
+#define CNTHP_CTL      p15, 4, c14, c2, 1
+
+/* AArch32 coproc registers for 32bit MMU descriptor support */
+#define PRRR           p15, 0, c10, c2, 0
+#define NMRR           p15, 0, c10, c2, 1
+#define DACR           p15, 0, c3, c0, 0
+
+/* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
+#define ICC_IAR1       p15, 0, c12, c12, 0
+#define ICC_IAR0       p15, 0, c12, c8, 0
+#define ICC_EOIR1      p15, 0, c12, c12, 1
+#define ICC_EOIR0      p15, 0, c12, c8, 1
+#define ICC_HPPIR1     p15, 0, c12, c12, 2
+#define ICC_HPPIR0     p15, 0, c12, c8, 2
+#define ICC_BPR1       p15, 0, c12, c12, 3
+#define ICC_BPR0       p15, 0, c12, c8, 3
+#define ICC_DIR                p15, 0, c12, c11, 1
+#define ICC_PMR                p15, 0, c4, c6, 0
+#define ICC_RPR                p15, 0, c12, c11, 3
+#define ICC_CTLR       p15, 0, c12, c12, 4
+#define ICC_MCTLR      p15, 6, c12, c12, 4
+#define ICC_SRE                p15, 0, c12, c12, 5
+#define ICC_HSRE       p15, 4, c12, c9, 5
+#define ICC_MSRE       p15, 6, c12, c12, 5
+#define ICC_IGRPEN0    p15, 0, c12, c12, 6
+#define ICC_IGRPEN1    p15, 0, c12, c12, 7
+#define ICC_MGRPEN1    p15, 6, c12, c12, 7
+
+/* 64 bit system register defines The format is: coproc, opt1, CRm */
+#define TTBR0_64       p15, 0, c2
+#define TTBR1_64       p15, 1, c2
+#define CNTVOFF_64     p15, 4, c14
+#define VTTBR_64       p15, 6, c2
+#define CNTPCT_64      p15, 0, c14
+#define HTTBR_64       p15, 4, c2
+#define CNTHP_CVAL_64  p15, 6, c14
+#define PAR_64         p15, 0, c7
+
+/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
+#define ICC_SGI1R_EL1_64       p15, 0, c12
+#define ICC_ASGI1R_EL1_64      p15, 1, c12
+#define ICC_SGI0R_EL1_64       p15, 2, c12
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE                U(0x0)
+#define MAIR_DEV_nGnRE         U(0x4)
+#define MAIR_DEV_nGRE          U(0x8)
+#define MAIR_DEV_GRE           U(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ *  WT:         Write Through
+ *  WB:         Write Back
+ *  NC:         Non-Cacheable
+ *
+ * Transient Hint
+ *  NTR: Non-Transient
+ *  TR:         Transient
+ *
+ * Allocation Policy
+ *  RA:         Read Allocate
+ *  WA:         Write Allocate
+ *  RWA: Read and Write Allocate
+ *  NA:         No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA     U(0x1)
+#define MAIR_NORM_WT_TR_RA     U(0x2)
+#define MAIR_NORM_WT_TR_RWA    U(0x3)
+#define MAIR_NORM_NC           U(0x4)
+#define MAIR_NORM_WB_TR_WA     U(0x5)
+#define MAIR_NORM_WB_TR_RA     U(0x6)
+#define MAIR_NORM_WB_TR_RWA    U(0x7)
+#define MAIR_NORM_WT_NTR_NA    U(0x8)
+#define MAIR_NORM_WT_NTR_WA    U(0x9)
+#define MAIR_NORM_WT_NTR_RA    U(0xa)
+#define MAIR_NORM_WT_NTR_RWA   U(0xb)
+#define MAIR_NORM_WB_NTR_NA    U(0xc)
+#define MAIR_NORM_WB_NTR_WA    U(0xd)
+#define MAIR_NORM_WB_NTR_RA    U(0xe)
+#define MAIR_NORM_WB_NTR_RWA   U(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT  U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)  \
+               ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/* PAR fields */
+#define PAR_F_SHIFT    U(0)
+#define PAR_F_MASK     ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK  (BIT_64(40) - ULL(1)) /* 40-bits-wide page address */
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.4 onwards
+ ******************************************************************************/
+#define AMCR           p15, 0, c13, c2, 0
+#define AMCFGR         p15, 0, c13, c2, 1
+#define AMCGCR         p15, 0, c13, c2, 2
+#define AMUSERENR      p15, 0, c13, c2, 3
+#define AMCNTENCLR0    p15, 0, c13, c2, 4
+#define AMCNTENSET0    p15, 0, c13, c2, 5
+#define AMCNTENCLR1    p15, 0, c13, c3, 0
+#define AMCNTENSET1    p15, 0, c13, c3, 1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00     p15, 0, c0
+#define AMEVCNTR01     p15, 1, c0
+#define AMEVCNTR02     p15, 2, c0
+#define AMEVCNTR03     p15, 3, c0
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00    p15, 0, c13, c6, 0
+#define AMEVTYPER01    p15, 0, c13, c6, 1
+#define AMEVTYPER02    p15, 0, c13, c6, 2
+#define AMEVTYPER03    p15, 0, c13, c6, 3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10     p15, 0, c4
+#define AMEVCNTR11     p15, 1, c4
+#define AMEVCNTR12     p15, 2, c4
+#define AMEVCNTR13     p15, 3, c4
+#define AMEVCNTR14     p15, 4, c4
+#define AMEVCNTR15     p15, 5, c4
+#define AMEVCNTR16     p15, 6, c4
+#define AMEVCNTR17     p15, 7, c4
+#define AMEVCNTR18     p15, 0, c5
+#define AMEVCNTR19     p15, 1, c5
+#define AMEVCNTR1A     p15, 2, c5
+#define AMEVCNTR1B     p15, 3, c5
+#define AMEVCNTR1C     p15, 4, c5
+#define AMEVCNTR1D     p15, 5, c5
+#define AMEVCNTR1E     p15, 6, c5
+#define AMEVCNTR1F     p15, 7, c5
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10    p15, 0, c13, c14, 0
+#define AMEVTYPER11    p15, 0, c13, c14, 1
+#define AMEVTYPER12    p15, 0, c13, c14, 2
+#define AMEVTYPER13    p15, 0, c13, c14, 3
+#define AMEVTYPER14    p15, 0, c13, c14, 4
+#define AMEVTYPER15    p15, 0, c13, c14, 5
+#define AMEVTYPER16    p15, 0, c13, c14, 6
+#define AMEVTYPER17    p15, 0, c13, c14, 7
+#define AMEVTYPER18    p15, 0, c13, c15, 0
+#define AMEVTYPER19    p15, 0, c13, c15, 1
+#define AMEVTYPER1A    p15, 0, c13, c15, 2
+#define AMEVTYPER1B    p15, 0, c13, c15, 3
+#define AMEVTYPER1C    p15, 0, c13, c15, 4
+#define AMEVTYPER1D    p15, 0, c13, c15, 5
+#define AMEVTYPER1E    p15, 0, c13, c15, 6
+#define AMEVTYPER1F    p15, 0, c13, c15, 7
+
+#endif /* ARCH_H */
diff --git a/include/arch/aarch32/arch_helpers.h b/include/arch/aarch32/arch_helpers.h
new file mode 100644 (file)
index 0000000..a6fe14f
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_HELPERS_H
+#define ARCH_HELPERS_H
+
+#include <arch.h>
+#include <cdefs.h>
+#include <stdint.h>
+#include <string.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_COPROCR_WRITE_FUNC(_name, coproc, opc1, CRn, CRm, opc2)        \
+static inline void write_## _name(u_register_t v)                      \
+{                                                                      \
+       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC(_name, coproc, opc1, CRn, CRm, opc2) \
+static inline u_register_t read_ ## _name(void)                                \
+{                                                                      \
+       u_register_t v;                                                 \
+       __asm__ volatile ("mrc "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : "=r" (v));\
+       return v;                                                       \
+}
+
+/*
+ *  The undocumented %Q and %R extended asm are used to implemented the below
+ *  64 bit `mrrc` and `mcrr` instructions.
+ */
+
+#define _DEFINE_COPROCR_WRITE_FUNC_64(_name, coproc, opc1, CRm)                \
+static inline void write64_## _name(uint64_t v)                                \
+{                                                                      \
+       __asm__ volatile ("mcrr "#coproc","#opc1", %Q0, %R0,"#CRm : : "r" (v));\
+}
+
+#define _DEFINE_COPROCR_READ_FUNC_64(_name, coproc, opc1, CRm)         \
+static inline uint64_t read64_## _name(void)                           \
+{      uint64_t v;                                                     \
+       __asm__ volatile ("mrrc "#coproc","#opc1", %Q0, %R0,"#CRm : "=r" (v));\
+       return v;                                                       \
+}
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)                     \
+static inline u_register_t read_ ## _name(void)                                \
+{                                                                      \
+       u_register_t v;                                                 \
+       __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v));            \
+       return v;                                                       \
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)                    \
+static inline void write_ ## _name(u_register_t v)                     \
+{                                                                      \
+       __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v));        \
+}
+
+#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name)              \
+static inline void write_ ## _name(const u_register_t v)               \
+{                                                                      \
+       __asm__ volatile ("msr " #_reg_name ", %0" : : "i" (v));        \
+}
+
+/* Define read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC(_name, ...)                           \
+       _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)
+
+/* Define write function for coproc register */
+#define DEFINE_COPROCR_WRITE_FUNC(_name, ...)                          \
+       _DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
+
+/* Define read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS(_name, ...)                            \
+       _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)                   \
+       _DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
+
+/* Define 64 bit read function for coproc register */
+#define DEFINE_COPROCR_READ_FUNC_64(_name, ...)                        \
+       _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)
+
+/* Define 64 bit write function for coproc register */
+#define DEFINE_COPROCR_WRITE_FUNC_64(_name, ...)                       \
+       _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
+
+/* Define 64 bit read & write function for coproc register */
+#define DEFINE_COPROCR_RW_FUNCS_64(_name, ...)                                 \
+       _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)                \
+       _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name)                                  \
+       _DEFINE_SYSREG_READ_FUNC(_name, _name)                          \
+       _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/**********************************************************************
+ * Macros to create inline functions for tlbi operations
+ *********************************************************************/
+
+#define _DEFINE_TLBIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)         \
+static inline void tlbi##_op(void)                                     \
+{                                                                      \
+       u_register_t v = 0;                                             \
+       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_BPIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)          \
+static inline void bpi##_op(void)                                      \
+{                                                                      \
+       u_register_t v = 0;                                             \
+       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)   \
+static inline void tlbi##_op(u_register_t v)                           \
+{                                                                      \
+       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for simple TLBI operation */
+#define DEFINE_TLBIOP_FUNC(_op, ...)                                   \
+       _DEFINE_TLBIOP_FUNC(_op, __VA_ARGS__)
+
+/* Define function for TLBI operation with register parameter */
+#define DEFINE_TLBIOP_PARAM_FUNC(_op, ...)                             \
+       _DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/* Define function for simple BPI operation */
+#define DEFINE_BPIOP_FUNC(_op, ...)                                    \
+       _DEFINE_BPIOP_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for DC operations
+ *********************************************************************/
+#define _DEFINE_DCOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)     \
+static inline void dc##_op(u_register_t v)                             \
+{                                                                      \
+       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
+}
+
+/* Define function for DC operation with register parameter */
+#define DEFINE_DCOP_PARAM_FUNC(_op, ...)                               \
+       _DEFINE_DCOP_PARAM_FUNC(_op, __VA_ARGS__)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+ /* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op)                                         \
+static inline void _op(void)                                           \
+{                                                                      \
+       __asm__ (#_op);                                                 \
+}
+
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type)                             \
+static inline void _op ## _type(void)                                  \
+{                                                                      \
+       __asm__ (#_op " " #_type);                                      \
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type)                       \
+static inline void _op ## _type(u_register_t v)                                \
+{                                                                      \
+        __asm__ (#_op " " #_type ", %0" : : "r" (v));                  \
+}
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+void disable_mmu_secure(void);
+void disable_mmu_icache_secure(void);
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+
+/* dmb ld is not valid for armv7/thumb machines */
+#if ARM_ARCH_MAJOR != 7
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
+#endif
+
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_FUNC(isb)
+
+void __dead2 smc(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3,
+                uint32_t r4, uint32_t r5, uint32_t r6, uint32_t r7);
+
+DEFINE_SYSREG_RW_FUNCS(spsr)
+DEFINE_SYSREG_RW_FUNCS(cpsr)
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_COPROCR_READ_FUNC(mpidr, MPIDR)
+DEFINE_COPROCR_READ_FUNC(midr, MIDR)
+DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
+DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
+DEFINE_COPROCR_READ_FUNC(isr, ISR)
+DEFINE_COPROCR_READ_FUNC(clidr, CLIDR)
+DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64)
+
+DEFINE_COPROCR_RW_FUNCS(scr, SCR)
+DEFINE_COPROCR_RW_FUNCS(ctr, CTR)
+DEFINE_COPROCR_RW_FUNCS(sctlr, SCTLR)
+DEFINE_COPROCR_RW_FUNCS(actlr, ACTLR)
+DEFINE_COPROCR_RW_FUNCS(hsctlr, HSCTLR)
+DEFINE_COPROCR_RW_FUNCS(hcr, HCR)
+DEFINE_COPROCR_RW_FUNCS(hcptr, HCPTR)
+DEFINE_COPROCR_RW_FUNCS(cntfrq, CNTFRQ)
+DEFINE_COPROCR_RW_FUNCS(cnthctl, CNTHCTL)
+DEFINE_COPROCR_RW_FUNCS(mair0, MAIR0)
+DEFINE_COPROCR_RW_FUNCS(mair1, MAIR1)
+DEFINE_COPROCR_RW_FUNCS(hmair0, HMAIR0)
+DEFINE_COPROCR_RW_FUNCS(ttbcr, TTBCR)
+DEFINE_COPROCR_RW_FUNCS(htcr, HTCR)
+DEFINE_COPROCR_RW_FUNCS(ttbr0, TTBR0)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr0, TTBR0_64)
+DEFINE_COPROCR_RW_FUNCS(ttbr1, TTBR1)
+DEFINE_COPROCR_RW_FUNCS_64(httbr, HTTBR_64)
+DEFINE_COPROCR_RW_FUNCS(vpidr, VPIDR)
+DEFINE_COPROCR_RW_FUNCS(vmpidr, VMPIDR)
+DEFINE_COPROCR_RW_FUNCS_64(vttbr, VTTBR_64)
+DEFINE_COPROCR_RW_FUNCS_64(ttbr1, TTBR1_64)
+DEFINE_COPROCR_RW_FUNCS_64(cntvoff, CNTVOFF_64)
+DEFINE_COPROCR_RW_FUNCS(csselr, CSSELR)
+DEFINE_COPROCR_RW_FUNCS(hstr, HSTR)
+DEFINE_COPROCR_RW_FUNCS(cnthp_ctl_el2, CNTHP_CTL)
+DEFINE_COPROCR_RW_FUNCS(cnthp_tval_el2, CNTHP_TVAL)
+DEFINE_COPROCR_RW_FUNCS_64(cnthp_cval_el2, CNTHP_CVAL_64)
+
+#define get_cntp_ctl_enable(x)  (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
+                                        CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x)   (((x) >> CNTP_CTL_IMASK_SHIFT) & \
+                                        CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
+                                        CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x)  ((x) |= U(1) << CNTP_CTL_ENABLE_SHIFT)
+#define set_cntp_ctl_imask(x)   ((x) |= U(1) << CNTP_CTL_IMASK_SHIFT)
+
+#define clr_cntp_ctl_enable(x)  ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x)   ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE)
+DEFINE_COPROCR_RW_FUNCS(icc_pmr_el1, ICC_PMR)
+DEFINE_COPROCR_RW_FUNCS(icc_rpr_el1, ICC_RPR)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el3, ICC_MGRPEN1)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1)
+DEFINE_COPROCR_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir0_el1, ICC_HPPIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_hppir1_el1, ICC_HPPIR1)
+DEFINE_COPROCR_RW_FUNCS(icc_iar0_el1, ICC_IAR0)
+DEFINE_COPROCR_RW_FUNCS(icc_iar1_el1, ICC_IAR1)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir0_el1, ICC_EOIR0)
+DEFINE_COPROCR_RW_FUNCS(icc_eoir1_el1, ICC_EOIR1)
+DEFINE_COPROCR_RW_FUNCS_64(icc_sgi0r_el1, ICC_SGI0R_EL1_64)
+DEFINE_COPROCR_WRITE_FUNC_64(icc_sgi1r, ICC_SGI1R_EL1_64)
+
+DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR)
+DEFINE_COPROCR_RW_FUNCS(cnthp_ctl, CNTHP_CTL)
+DEFINE_COPROCR_READ_FUNC(pmcr, PMCR)
+
+/*
+ * Address translation
+ */
+DEFINE_COPROCR_WRITE_FUNC(ats1cpr, ATS1CPR)
+DEFINE_COPROCR_WRITE_FUNC(ats1hr, ATS1HR)
+DEFINE_COPROCR_RW_FUNCS_64(par, PAR_64)
+
+DEFINE_COPROCR_RW_FUNCS(nsacr, NSACR)
+
+/* AArch32 coproc registers for 32bit MMU descriptor support */
+DEFINE_COPROCR_RW_FUNCS(prrr, PRRR)
+DEFINE_COPROCR_RW_FUNCS(nmrr, NMRR)
+DEFINE_COPROCR_RW_FUNCS(dacr, DACR)
+
+DEFINE_COPROCR_RW_FUNCS(amcntenset0, AMCNTENSET0)
+DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)
+DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
+DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
+
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr03, AMEVCNTR03)
+
+/*
+ * TLBI operation prototypes
+ */
+DEFINE_TLBIOP_FUNC(all, TLBIALL)
+DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
+DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
+DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
+DEFINE_TLBIOP_PARAM_FUNC(mvahis, TLBIMVAHIS)
+
+/*
+ * BPI operation prototypes.
+ */
+DEFINE_BPIOP_FUNC(allis, BPIALLIS)
+
+/*
+ * DC operation prototypes
+ */
+DEFINE_DCOP_PARAM_FUNC(civac, DCCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(ivac, DCIMVAC)
+DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
+
+/* Previously defined accessor functions with incomplete register names  */
+#define dsb()                  dsbsy()
+#define dmb()                  dmbsy()
+
+/* dmb ld is not valid for armv7/thumb machines, so alias it to dmb */
+#if ARM_ARCH_MAJOR == 7
+#define        dmbld()                 dmb()
+#endif
+
+#define IS_IN_SECURE() \
+       (GET_NS_BIT(read_scr()) == 0)
+
+#define IS_IN_HYP()    (GET_M32(read_cpsr()) == MODE32_hyp)
+#define IS_IN_SVC()    (GET_M32(read_cpsr()) == MODE32_svc)
+#define IS_IN_MON()    (GET_M32(read_cpsr()) == MODE32_mon)
+#define IS_IN_EL2()    IS_IN_HYP()
+/* If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3 */
+#define IS_IN_EL3() \
+       ((GET_M32(read_cpsr()) == MODE32_mon) ||        \
+               (IS_IN_SECURE() && (GET_M32(read_cpsr()) != MODE32_usr)))
+
+static inline unsigned int get_current_el(void)
+{
+       if (IS_IN_EL3()) {
+               return 3U;
+       } else if (IS_IN_EL2()) {
+               return 2U;
+       } else {
+               return 1U;
+       }
+}
+
+/* Macros for compatibility with AArch64 system registers */
+#define read_mpidr_el1()       read_mpidr()
+
+#define read_scr_el3()         read_scr()
+#define write_scr_el3(_v)      write_scr(_v)
+
+#define read_hcr_el2()         read_hcr()
+#define write_hcr_el2(_v)      write_hcr(_v)
+
+#define read_cpacr_el1()       read_cpacr()
+#define write_cpacr_el1(_v)    write_cpacr(_v)
+
+#define read_cntfrq_el0()      read_cntfrq()
+#define write_cntfrq_el0(_v)   write_cntfrq(_v)
+#define read_isr_el1()         read_isr()
+
+#define read_cntpct_el0()      read64_cntpct()
+
+#define read_ctr_el0()         read_ctr()
+
+#define write_icc_sgi0r_el1(_v)        write64_icc_sgi0r_el1(_v)
+
+#define read_daif()            read_cpsr()
+#define write_daif(flags)      write_cpsr(flags)
+
+#define read_cnthp_cval_el2()  read64_cnthp_cval_el2()
+#define write_cnthp_cval_el2(v)        write64_cnthp_cval_el2(v)
+
+#define read_amcntenset0_el0() read_amcntenset0()
+#define read_amcntenset1_el0() read_amcntenset1()
+
+/* Helper functions to manipulate CPSR */
+static inline void enable_irq(void)
+{
+       /*
+        * The compiler memory barrier will prevent the compiler from
+        * scheduling non-volatile memory access after the write to the
+        * register.
+        *
+        * This could happen if some initialization code issues non-volatile
+        * accesses to an area used by an interrupt handler, in the assumption
+        * that it is safe as the interrupts are disabled at the time it does
+        * that (according to program order). However, non-volatile accesses
+        * are not necessarily in program order relatively with volatile inline
+        * assembly statements (and volatile accesses).
+        */
+       COMPILER_BARRIER();
+       __asm__ volatile ("cpsie        i");
+       isb();
+}
+
+static inline void enable_serror(void)
+{
+       COMPILER_BARRIER();
+       __asm__ volatile ("cpsie        a");
+       isb();
+}
+
+static inline void enable_fiq(void)
+{
+       COMPILER_BARRIER();
+       __asm__ volatile ("cpsie        f");
+       isb();
+}
+
+static inline void disable_irq(void)
+{
+       COMPILER_BARRIER();
+       __asm__ volatile ("cpsid        i");
+       isb();
+}
+
+static inline void disable_serror(void)
+{
+       COMPILER_BARRIER();
+       __asm__ volatile ("cpsid        a");
+       isb();
+}
+
+static inline void disable_fiq(void)
+{
+       COMPILER_BARRIER();
+       __asm__ volatile ("cpsid        f");
+       isb();
+}
+
+#endif /* ARCH_HELPERS_H */
diff --git a/include/arch/aarch32/asm_macros.S b/include/arch/aarch32/asm_macros.S
new file mode 100644 (file)
index 0000000..c54f75c
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASM_MACROS_S
+#define ASM_MACROS_S
+
+#include <arch.h>
+#include <asm_macros_common.S>
+#include <spinlock.h>
+
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_reg, _coproc) \
+       stcopr  _reg, _coproc; \
+       dsb     ish; \
+       stcopr  _reg, _coproc
+#else
+#define TLB_INVALIDATE(_reg, _coproc) \
+       stcopr  _reg, _coproc
+#endif
+
+#define WORD_SIZE      4
+
+       /*
+        * Co processor register accessors
+        */
+       .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
+       mrc     \coproc, \opc1, \reg, \CRn, \CRm, \opc2
+       .endm
+
+       .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
+       mrrc    \coproc, \opc1, \reg1, \reg2, \CRm
+       .endm
+
+       .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
+       mcr     \coproc, \opc1, \reg, \CRn, \CRm, \opc2
+       .endm
+
+       .macro stcopr16 reg1, reg2, coproc, opc1, CRm
+       mcrr    \coproc, \opc1, \reg1, \reg2, \CRm
+       .endm
+
+       /* Cache line size helpers */
+       .macro  dcache_line_size  reg, tmp
+       ldcopr  \tmp, CTR
+       ubfx    \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
+       mov     \reg, #WORD_SIZE
+       lsl     \reg, \reg, \tmp
+       .endm
+
+       .macro  icache_line_size  reg, tmp
+       ldcopr  \tmp, CTR
+       and     \tmp, \tmp, #CTR_IMINLINE_MASK
+       mov     \reg, #WORD_SIZE
+       lsl     \reg, \reg, \tmp
+       .endm
+
+       /*
+        * Declare the exception vector table, enforcing it is aligned on a
+        * 32 byte boundary.
+        */
+       .macro vector_base  label
+       .section .vectors, "ax"
+       .align 5
+       \label:
+       .endm
+
+       /*
+        * This macro calculates the base address of the current CPU's multi
+        * processor(MP) stack using the plat_my_core_pos() index, the name of
+        * the stack storage and the size of each stack.
+        * Out: r0 = physical address of stack base
+        * Clobber: r14, r1, r2
+        */
+       .macro get_my_mp_stack _name, _size
+       bl  plat_my_core_pos
+       ldr r2, =(\_name + \_size)
+       mov r1, #\_size
+       mla r0, r0, r1, r2
+       .endm
+
+       /*
+        * This macro calculates the base address of a uniprocessor(UP) stack
+        * using the name of the stack storage and the size of the stack
+        * Out: r0 = physical address of stack base
+        */
+       .macro get_up_stack _name, _size
+       ldr r0, =(\_name + \_size)
+       .endm
+
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+       /*
+        * ARMv7 cores without Virtualization extension do not support the
+        * eret instruction.
+        */
+       .macro eret
+       movs    pc, lr
+       .endm
+#endif
+
+#if (ARM_ARCH_MAJOR == 7)
+       /* ARMv7 does not support stl instruction */
+       .macro stl _reg, _write_lock
+       dmb
+       str     \_reg, \_write_lock
+       dsb
+       .endm
+#endif
+
+       /*
+        * Helper macro to generate the best mov/movw/movt combinations
+        * according to the value to be moved.
+        */
+       .macro mov_imm _reg, _val
+               .if ((\_val) & 0xffff0000) == 0
+                       mov     \_reg, #(\_val)
+               .else
+                       movw    \_reg, #((\_val) & 0xffff)
+                       movt    \_reg, #((\_val) >> 16)
+               .endif
+       .endm
+
+       /*
+        * Macro to mark instances where we're jumping to a function and don't
+        * expect a return. To provide the function being jumped to with
+        * additional information, we use 'bl' instruction to jump rather than
+        * 'b'.
+         *
+        * Debuggers infer the location of a call from where LR points to, which
+        * is usually the instruction after 'bl'. If this macro expansion
+        * happens to be the last location in a function, that'll cause the LR
+        * to point a location beyond the function, thereby misleading debugger
+        * back trace. We therefore insert a 'nop' after the function call for
+        * debug builds, unless 'skip_nop' parameter is non-zero.
+        */
+       .macro no_ret _func:req, skip_nop=0
+       bl      \_func
+#if DEBUG
+       .ifeq \skip_nop
+       nop
+       .endif
+#endif
+       .endm
+
+       /*
+        * Reserve space for a spin lock in assembly file.
+        */
+       .macro define_asm_spinlock _name:req
+       .align  SPINLOCK_ASM_ALIGN
+       \_name:
+       .space  SPINLOCK_ASM_SIZE
+       .endm
+
+       /*
+        * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
+        * and the top 32 bits of `_val` into `_reg_h`.  If either the bottom
+        * or top word of `_val` is zero, the corresponding OR operation
+        * is skipped.
+        */
+       .macro orr64_imm _reg_l, _reg_h, _val
+               .if (\_val >> 32)
+                       orr \_reg_h, \_reg_h, #(\_val >> 32)
+               .endif
+               .if (\_val & 0xffffffff)
+                       orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
+               .endif
+       .endm
+
+       /*
+        * Helper macro to bitwise-clear bits in `_reg_l` and
+        * `_reg_h` given a 64 bit immediate `_val`.  The set bits
+        * in the bottom word of `_val` dictate which bits from
+        * `_reg_l` should be cleared.  Similarly, the set bits in
+        * the top word of `_val` dictate which bits from `_reg_h`
+        * should be cleared.  If either the bottom or top word of
+        * `_val` is zero, the corresponding BIC operation is skipped.
+        */
+       .macro bic64_imm _reg_l, _reg_h, _val
+               .if (\_val >> 32)
+                       bic \_reg_h, \_reg_h, #(\_val >> 32)
+               .endif
+               .if (\_val & 0xffffffff)
+                       bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
+               .endif
+       .endm
+
+#endif /* ASM_MACROS_S */
diff --git a/include/arch/aarch32/assert_macros.S b/include/arch/aarch32/assert_macros.S
new file mode 100644 (file)
index 0000000..ab3a2eb
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASSERT_MACROS_S
+#define ASSERT_MACROS_S
+
+       /*
+        * Assembler macro to enable asm_assert. We assume that the stack is
+        * initialized prior to invoking this macro.
+        */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+       .pushsection .rodata.str1.1, "aS" ;\
+       .L_assert_filename: ;\
+                       .string __FILE__ ;\
+       .popsection ;\
+.endif ;\
+       b##_cc  300f ;\
+       ldr     r0, =.L_assert_filename ;\
+       ldr     r1, =__LINE__ ;\
+       b       asm_assert;\
+300:
+
+#endif /* ASSERT_MACROS_S */
diff --git a/include/arch/aarch32/console_macros.S b/include/arch/aarch32/console_macros.S
new file mode 100644 (file)
index 0000000..ba6e7d0
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef CONSOLE_MACROS_S
+#define CONSOLE_MACROS_S
+
+#include <console.h>
+
+/*
+ * This macro encapsulates the common setup that has to be done at the end of
+ * a console driver's register function. It will register all of the driver's
+ * callbacks in the console_t structure and initialize the flags field (by
+ * default consoles are enabled for the "boot" and "crash" states, this can be
+ * changed after registration with the console_set_scope() function). It ends
+ * with a tail call that will include return to the caller.
+ * REQUIRES console_t pointer in x0 and a valid return address in x30.
+ */
+/*
+ * The USE_FINISH_CONSOLE_REG_2 guard is introduced to allow selection between
+ * the 2 variants of the finish_console_register macro and will be removed
+ * once the deprecated variant is removed.
+ */
+#ifndef USE_FINISH_CONSOLE_REG_2
+#if !ERROR_DEPRECATED
+       /* This version of the macro is deprecated. Use the new version */
+       .macro  finish_console_register _driver
+       /*
+        * Add these weak definitions so we will automatically write a 0 if the
+        * function doesn't exist. I'd rather use .ifdef but that only works if
+        * the function was defined (not just declared .global) above this point
+        * in the file, which we can't guarantee.
+        */
+       .weak console_\_driver\()_putc
+       .weak console_\_driver\()_getc
+       .weak console_\_driver\()_flush
+
+       /* Don't use adrp on weak funcs! See GNU ld bugzilla issue 22589. */
+       ldr     r1, =console_\_driver\()_putc
+       str     r1, [r0, #CONSOLE_T_PUTC]
+       ldr     r1, =console_\_driver\()_getc
+       str     r1, [r0, #CONSOLE_T_GETC]
+       ldr     r1, =console_\_driver\()_flush
+       str     r1, [r0, #CONSOLE_T_FLUSH]
+       mov     r1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
+       str     r1, [r0, #CONSOLE_T_FLAGS]
+       b       console_register
+       .endm
+#endif /* ERROR_DEPRECATED */
+#else /* USE_FINISH_CONSOLE_REG_2 */
+       /* The new version of the macro not using weak references */
+       .macro  finish_console_register _driver, putc=0, getc=0, flush=0
+       /*
+        * If any of the callback is not specified or set as 0, then the
+        * corresponding callback entry in console_t is set to 0.
+        */
+       .ifne \putc
+         ldr   r1, =console_\_driver\()_putc
+       .else
+         mov   r1, #0
+       .endif
+       str     r1, [r0, #CONSOLE_T_PUTC]
+
+       .ifne \getc
+         ldr   r1, =console_\_driver\()_getc
+       .else
+         mov   r1, #0
+       .endif
+       str     r1, [r0, #CONSOLE_T_GETC]
+
+       .ifne \flush
+         ldr   r1, =console_\_driver\()_flush
+       .else
+         mov   r1, #0
+       .endif
+       str     r1, [r0, #CONSOLE_T_FLUSH]
+
+       mov     r1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
+       str     r1, [r0, #CONSOLE_T_FLAGS]
+       b       console_register
+       .endm
+#endif /* USE_FINISH_CONSOLE_REG_2 */
+#endif /* CONSOLE_MACROS_S */
diff --git a/include/arch/aarch32/el3_common_macros.S b/include/arch/aarch32/el3_common_macros.S
new file mode 100644 (file)
index 0000000..048f161
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef EL3_COMMON_MACROS_S
+#define EL3_COMMON_MACROS_S
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+       /*
+        * Helper macro to initialise EL3 registers we care about.
+        */
+       .macro el3_arch_init_common
+       /* ---------------------------------------------------------------------
+        * SCTLR has already been initialised - read current value before
+        * modifying.
+        *
+        * SCTLR.I: Enable the instruction cache.
+        *
+        * SCTLR.A: Enable Alignment fault checking. All instructions that load
+        *  or store one or more registers have an alignment check that the
+        *  address being accessed is aligned to the size of the data element(s)
+        *  being accessed.
+        * ---------------------------------------------------------------------
+        */
+       ldr     r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
+       ldcopr  r0, SCTLR
+       orr     r0, r0, r1
+       stcopr  r0, SCTLR
+       isb
+
+       /* ---------------------------------------------------------------------
+        * Initialise SCR, setting all fields rather than relying on the hw.
+        *
+        * SCR.SIF: Enabled so that Secure state instruction fetches from
+        *  Non-secure memory are not permitted.
+        * ---------------------------------------------------------------------
+        */
+       ldr     r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
+       stcopr  r0, SCR
+
+       /* -----------------------------------------------------
+        * Enable the Asynchronous data abort now that the
+        * exception vectors have been setup.
+        * -----------------------------------------------------
+        */
+       cpsie   a
+       isb
+
+       /* ---------------------------------------------------------------------
+        * Initialise NSACR, setting all the fields, except for the
+        * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
+        * fields are architecturally UNKNOWN on reset.
+        *
+        * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
+        *  cp11 field is ignored, but is set to same value as cp10. The cp10
+        *  field is set to allow access to Advanced SIMD and floating point
+        *  features from both Security states.
+        * ---------------------------------------------------------------------
+        */
+       ldcopr  r0, NSACR
+       and     r0, r0, #NSACR_IMP_DEF_MASK
+       orr     r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
+       stcopr  r0, NSACR
+       isb
+
+       /* ---------------------------------------------------------------------
+        * Initialise CPACR, setting all fields rather than relying on hw. Some
+        * fields are architecturally UNKNOWN on reset.
+        *
+        * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
+        *  to trace registers. Set to zero to allow access.
+        *
+        * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
+        *  cp11 field is ignored, but is set to same value as cp10. The cp10
+        *  field is set to allow full access from PL0 and PL1 to floating-point
+        *  and Advanced SIMD features.
+        * ---------------------------------------------------------------------
+        */
+       ldr     r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
+       stcopr  r0, CPACR
+       isb
+
+       /* ---------------------------------------------------------------------
+        * Initialise FPEXC, setting all fields rather than relying on hw. Some
+        * fields are architecturally UNKNOWN on reset and are set to zero
+        * except for field(s) listed below.
+        *
+        * FPEXC.EN: Enable access to Advanced SIMD and floating point features
+        *  from all exception levels.
+        * ---------------------------------------------------------------------
+        */
+       ldr     r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
+       vmsr    FPEXC, r0
+       isb
+
+#if (ARM_ARCH_MAJOR > 7)
+       /* ---------------------------------------------------------------------
+        * Initialise SDCR, setting all the fields rather than relying on hw.
+        *
+        * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
+        * Secure EL1 are disabled.
+        * ---------------------------------------------------------------------
+        */
+       ldr     r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE))
+       stcopr  r0, SDCR
+#endif
+
+       /*
+        * If Data Independent Timing (DIT) functionality is implemented,
+        * always enable DIT in EL3
+        */
+       ldcopr  r0, ID_PFR0
+       and     r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
+       cmp     r0, #ID_PFR0_DIT_SUPPORTED
+       bne     1f
+       mrs     r0, cpsr
+       orr     r0, r0, #CPSR_DIT_BIT
+       msr     cpsr_cxsf, r0
+1:
+       .endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ *  _init_sctlr:
+ *     Whether the macro needs to initialise the SCTLR register including
+ *     configuring the endianness of data accesses.
+ *
+ *  _warm_boot_mailbox:
+ *     Whether the macro needs to detect the type of boot (cold/warm). The
+ *     detection is based on the platform entrypoint address : if it is zero
+ *     then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ *     this macro jumps on the platform entrypoint address.
+ *
+ *  _secondary_cold_boot:
+ *     Whether the macro needs to identify the CPU that is calling it: primary
+ *     CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ *     the platform initialisations, while the secondaries will be put in a
+ *     platform-specific state in the meantime.
+ *
+ *     If the caller knows this macro will only be called by the primary CPU
+ *     then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ *     Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ *     Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ *     Address of the exception vectors to program in the VBAR_EL3 register.
+ * -----------------------------------------------------------------------------
+ */
+       .macro el3_entrypoint_common                                    \
+               _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,  \
+               _init_memory, _init_c_runtime, _exception_vectors
+
+       /* Make sure we are in Secure Mode */
+#if ENABLE_ASSERTIONS
+       ldcopr  r0, SCR
+       tst     r0, #SCR_NS_BIT
+       ASM_ASSERT(eq)
+#endif
+
+       .if \_init_sctlr
+               /* -------------------------------------------------------------
+                * This is the initialisation of SCTLR and so must ensure that
+                * all fields are explicitly set rather than relying on hw. Some
+                * fields reset to an IMPLEMENTATION DEFINED value.
+                *
+                * SCTLR.TE: Set to zero so that exceptions to an Exception
+                *  Level executing at PL1 are taken to A32 state.
+                *
+                * SCTLR.EE: Set the CPU endianness before doing anything that
+                *  might involve memory reads or writes. Set to zero to select
+                *  Little Endian.
+                *
+                * SCTLR.V: Set to zero to select the normal exception vectors
+                *  with base address held in VBAR.
+                *
+                * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+                *  safe behaviour upon exception entry to EL3.
+                * -------------------------------------------------------------
+                */
+               ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
+                               SCTLR_V_BIT | SCTLR_DSSBS_BIT))
+               stcopr  r0, SCTLR
+               isb
+       .endif /* _init_sctlr */
+
+       /* Switch to monitor mode */
+       cps     #MODE32_mon
+       isb
+
+       .if \_warm_boot_mailbox
+               /* -------------------------------------------------------------
+                * This code will be executed for both warm and cold resets.
+                * Now is the time to distinguish between the two.
+                * Query the platform entrypoint address and if it is not zero
+                * then it means it is a warm boot so jump to this address.
+                * -------------------------------------------------------------
+                */
+               bl      plat_get_my_entrypoint
+               cmp     r0, #0
+               bxne    r0
+       .endif /* _warm_boot_mailbox */
+
+       /* ---------------------------------------------------------------------
+        * Set the exception vectors (VBAR/MVBAR).
+        * ---------------------------------------------------------------------
+        */
+       ldr     r0, =\_exception_vectors
+       stcopr  r0, VBAR
+       stcopr  r0, MVBAR
+       isb
+
+       /* ---------------------------------------------------------------------
+        * It is a cold boot.
+        * Perform any processor specific actions upon reset e.g. cache, TLB
+        * invalidations etc.
+        * ---------------------------------------------------------------------
+        */
+       bl      reset_handler
+
+       el3_arch_init_common
+
+       .if \_secondary_cold_boot
+               /* -------------------------------------------------------------
+                * Check if this is a primary or secondary CPU cold boot.
+                * The primary CPU will set up the platform while the
+                * secondaries are placed in a platform-specific state until the
+                * primary CPU performs the necessary actions to bring them out
+                * of that state and allows entry into the OS.
+                * -------------------------------------------------------------
+                */
+               bl      plat_is_my_cpu_primary
+               cmp     r0, #0
+               bne     do_primary_cold_boot
+
+               /* This is a cold boot on a secondary CPU */
+               bl      plat_secondary_cold_boot_setup
+               /* plat_secondary_cold_boot_setup() is not supposed to return */
+               no_ret  plat_panic_handler
+
+       do_primary_cold_boot:
+       .endif /* _secondary_cold_boot */
+
+       /* ---------------------------------------------------------------------
+        * Initialize memory now. Secondary CPU initialization won't get to this
+        * point.
+        * ---------------------------------------------------------------------
+        */
+
+       .if \_init_memory
+               bl      platform_mem_init
+       .endif /* _init_memory */
+
+       /* ---------------------------------------------------------------------
+        * Init C runtime environment:
+        *   - Zero-initialise the NOBITS sections. There are 2 of them:
+        *       - the .bss section;
+        *       - the coherent memory section (if any).
+        *   - Relocate the data section from ROM to RAM, if required.
+        * ---------------------------------------------------------------------
+        */
+       .if \_init_c_runtime
+#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
+               /* -----------------------------------------------------------------
+                * Invalidate the RW memory used by the image. This
+                * includes the data and NOBITS sections. This is done to
+                * safeguard against possible corruption of this memory by
+                * dirty cache lines in a system cache as a result of use by
+                * an earlier boot loader stage.
+                * -----------------------------------------------------------------
+                */
+               ldr     r0, =__RW_START__
+               ldr     r1, =__RW_END__
+               sub     r1, r1, r0
+               bl      inv_dcache_range
+#endif
+
+               ldr     r0, =__BSS_START__
+               ldr     r1, =__BSS_SIZE__
+               bl      zeromem
+
+#if USE_COHERENT_MEM
+               ldr     r0, =__COHERENT_RAM_START__
+               ldr     r1, =__COHERENT_RAM_UNALIGNED_SIZE__
+               bl      zeromem
+#endif
+
+#ifdef IMAGE_BL1
+               /* -----------------------------------------------------
+                * Copy data from ROM to RAM.
+                * -----------------------------------------------------
+                */
+               ldr     r0, =__DATA_RAM_START__
+               ldr     r1, =__DATA_ROM_START__
+               ldr     r2, =__DATA_SIZE__
+               bl      memcpy4
+#endif
+       .endif /* _init_c_runtime */
+
+       /* ---------------------------------------------------------------------
+        * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+        * the MMU is enabled. There is no risk of reading stale stack memory
+        * after enabling the MMU as only the primary CPU is running at the
+        * moment.
+        * ---------------------------------------------------------------------
+        */
+       bl      plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+       .if \_init_c_runtime
+       bl      update_stack_protector_canary
+       .endif /* _init_c_runtime */
+#endif
+       .endm
+
+#endif /* EL3_COMMON_MACROS_S */
diff --git a/include/arch/aarch32/smccc_helpers.h b/include/arch/aarch32/smccc_helpers.h
new file mode 100644 (file)
index 0000000..67952ec
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SMCCC_HELPERS_H
+#define SMCCC_HELPERS_H
+
+#include <smccc.h>
+
+/* These are offsets to registers in smc_ctx_t */
+#define SMC_CTX_GPREG_R0       U(0x0)
+#define SMC_CTX_GPREG_R1       U(0x4)
+#define SMC_CTX_GPREG_R2       U(0x8)
+#define SMC_CTX_GPREG_R3       U(0xC)
+#define SMC_CTX_GPREG_R4       U(0x10)
+#define SMC_CTX_GPREG_R5       U(0x14)
+#define SMC_CTX_SP_USR         U(0x34)
+#define SMC_CTX_SPSR_MON       U(0x78)
+#define SMC_CTX_SP_MON         U(0x7C)
+#define SMC_CTX_LR_MON         U(0x80)
+#define SMC_CTX_SCR            U(0x84)
+#define SMC_CTX_PMCR           U(0x88)
+#define SMC_CTX_SIZE           U(0x90)
+
+#ifndef __ASSEMBLY__
+#include <cassert.h>
+#include <stdint.h>
+
+/*
+ * The generic structure to save arguments and callee saved registers during
+ * an SMC. Also this structure is used to store the result return values after
+ * the completion of SMC service.
+ */
+typedef struct smc_ctx {
+       u_register_t r0;
+       u_register_t r1;
+       u_register_t r2;
+       u_register_t r3;
+       u_register_t r4;
+       u_register_t r5;
+       u_register_t r6;
+       u_register_t r7;
+       u_register_t r8;
+       u_register_t r9;
+       u_register_t r10;
+       u_register_t r11;
+       u_register_t r12;
+       /* spsr_usr doesn't exist */
+       u_register_t sp_usr;
+       u_register_t lr_usr;
+       u_register_t spsr_irq;
+       u_register_t sp_irq;
+       u_register_t lr_irq;
+       u_register_t spsr_fiq;
+       u_register_t sp_fiq;
+       u_register_t lr_fiq;
+       u_register_t spsr_svc;
+       u_register_t sp_svc;
+       u_register_t lr_svc;
+       u_register_t spsr_abt;
+       u_register_t sp_abt;
+       u_register_t lr_abt;
+       u_register_t spsr_und;
+       u_register_t sp_und;
+       u_register_t lr_und;
+       u_register_t spsr_mon;
+       /*
+        * `sp_mon` will point to the C runtime stack in monitor mode. But prior
+        * to exit from SMC, this will point to the `smc_ctx_t` so that
+        * on next entry due to SMC, the `smc_ctx_t` can be easily accessed.
+        */
+       u_register_t sp_mon;
+       u_register_t lr_mon;
+       u_register_t scr;
+       u_register_t pmcr;
+       /*
+        * The workaround for CVE-2017-5715 requires storing information in
+        * the bottom 3 bits of the stack pointer.  Add a padding field to
+        * force the size of the struct to be a multiple of 8.
+        */
+       u_register_t pad;
+} smc_ctx_t __aligned(8);
+
+/*
+ * Compile time assertions related to the 'smc_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(SMC_CTX_GPREG_R0 == __builtin_offsetof(smc_ctx_t, r0), \
+       assert_smc_ctx_greg_r0_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R1 == __builtin_offsetof(smc_ctx_t, r1), \
+       assert_smc_ctx_greg_r1_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R2 == __builtin_offsetof(smc_ctx_t, r2), \
+       assert_smc_ctx_greg_r2_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R3 == __builtin_offsetof(smc_ctx_t, r3), \
+       assert_smc_ctx_greg_r3_offset_mismatch);
+CASSERT(SMC_CTX_GPREG_R4 == __builtin_offsetof(smc_ctx_t, r4), \
+       assert_smc_ctx_greg_r4_offset_mismatch);
+CASSERT(SMC_CTX_SP_USR == __builtin_offsetof(smc_ctx_t, sp_usr), \
+       assert_smc_ctx_sp_usr_offset_mismatch);
+CASSERT(SMC_CTX_LR_MON == __builtin_offsetof(smc_ctx_t, lr_mon), \
+       assert_smc_ctx_lr_mon_offset_mismatch);
+CASSERT(SMC_CTX_SPSR_MON == __builtin_offsetof(smc_ctx_t, spsr_mon), \
+       assert_smc_ctx_spsr_mon_offset_mismatch);
+
+CASSERT((sizeof(smc_ctx_t) & 0x7U) == 0U, assert_smc_ctx_not_aligned);
+CASSERT(SMC_CTX_SIZE == sizeof(smc_ctx_t), assert_smc_ctx_size_mismatch);
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h) {                         \
+       return (uintptr_t)(_h);                 \
+}
+#define SMC_RET1(_h, _r0) {                    \
+       ((smc_ctx_t *)(_h))->r0 = (_r0);        \
+       SMC_RET0(_h);                           \
+}
+#define SMC_RET2(_h, _r0, _r1) {               \
+       ((smc_ctx_t *)(_h))->r1 = (_r1);        \
+       SMC_RET1(_h, (_r0));                    \
+}
+#define SMC_RET3(_h, _r0, _r1, _r2) {          \
+       ((smc_ctx_t *)(_h))->r2 = (_r2);        \
+       SMC_RET2(_h, (_r0), (_r1));             \
+}
+#define SMC_RET4(_h, _r0, _r1, _r2, _r3) {     \
+       ((smc_ctx_t *)(_h))->r3 = (_r3);        \
+       SMC_RET3(_h, (_r0), (_r1), (_r2));      \
+}
+
+/*
+ * Helper macro to retrieve the SMC parameters from smc_ctx_t.
+ */
+#define get_smc_params_from_ctx(_hdl, _r1, _r2, _r3, _r4) {    \
+               _r1 = ((smc_ctx_t *)_hdl)->r1;          \
+               _r2 = ((smc_ctx_t *)_hdl)->r2;          \
+               _r3 = ((smc_ctx_t *)_hdl)->r3;          \
+               _r4 = ((smc_ctx_t *)_hdl)->r4;          \
+               }
+
+/* ------------------------------------------------------------------------
+ * Helper APIs for setting and retrieving appropriate `smc_ctx_t`.
+ * These functions need to implemented by the BL including this library.
+ * ------------------------------------------------------------------------
+ */
+
+/* Get the pointer to `smc_ctx_t` corresponding to the security state. */
+void *smc_get_ctx(unsigned int security_state);
+
+/* Set the next `smc_ctx_t` corresponding to the security state. */
+void smc_set_next_ctx(unsigned int security_state);
+
+/* Get the pointer to next `smc_ctx_t` already set by `smc_set_next_ctx()`. */
+void *smc_get_next_ctx(void);
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* SMCCC_HELPERS_H */
diff --git a/include/arch/aarch32/smccc_macros.S b/include/arch/aarch32/smccc_macros.S
new file mode 100644 (file)
index 0000000..1fe6c64
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef SMCCC_MACROS_S
+#define SMCCC_MACROS_S
+
+#include <arch.h>
+
+/*
+ * Macro to save the General purpose registers (r0 - r12), the banked
+ * spsr, lr, sp registers and the `scr` register to the SMC context on entry
+ * due a SMC call. The `lr` of the current mode (monitor) is expected to be
+ * already saved. The `sp` must point to the `smc_ctx_t` to save to.
+ * Additionally, also save the 'pmcr' register as this is updated whilst
+ * executing in the secure world.
+ */
+       .macro smccc_save_gp_mode_regs
+       /* Save r0 - r12 in the SMC context */
+       stm     sp, {r0-r12}
+       mov     r0, sp
+       add     r0, r0, #SMC_CTX_SP_USR
+
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+       /* Must be in secure state to restore Monitor mode */
+       ldcopr  r4, SCR
+       bic     r2, r4, #SCR_NS_BIT
+       stcopr  r2, SCR
+       isb
+
+       cps     #MODE32_sys
+       stm     r0!, {sp, lr}
+
+       cps     #MODE32_irq
+       mrs     r2, spsr
+       stm     r0!, {r2, sp, lr}
+
+       cps     #MODE32_fiq
+       mrs     r2, spsr
+       stm     r0!, {r2, sp, lr}
+
+       cps     #MODE32_svc
+       mrs     r2, spsr
+       stm     r0!, {r2, sp, lr}
+
+       cps     #MODE32_abt
+       mrs     r2, spsr
+       stm     r0!, {r2, sp, lr}
+
+       cps     #MODE32_und
+       mrs     r2, spsr
+       stm     r0!, {r2, sp, lr}
+
+       /* lr_mon is already saved by caller */
+       cps     #MODE32_mon
+       mrs     r2, spsr
+       stm     r0!, {r2}
+
+       stcopr  r4, SCR
+       isb
+#else
+       /* Save the banked registers including the current SPSR and LR */
+       mrs     r4, sp_usr
+       mrs     r5, lr_usr
+       mrs     r6, spsr_irq
+       mrs     r7, sp_irq
+       mrs     r8, lr_irq
+       mrs     r9, spsr_fiq
+       mrs     r10, sp_fiq
+       mrs     r11, lr_fiq
+       mrs     r12, spsr_svc
+       stm     r0!, {r4-r12}
+
+       mrs     r4, sp_svc
+       mrs     r5, lr_svc
+       mrs     r6, spsr_abt
+       mrs     r7, sp_abt
+       mrs     r8, lr_abt
+       mrs     r9, spsr_und
+       mrs     r10, sp_und
+       mrs     r11, lr_und
+       mrs     r12, spsr
+       stm     r0!, {r4-r12}
+       /* lr_mon is already saved by caller */
+
+       ldcopr  r4, SCR
+#endif
+       str     r4, [sp, #SMC_CTX_SCR]
+       ldcopr  r4, PMCR
+       str     r4, [sp, #SMC_CTX_PMCR]
+       .endm
+
+/*
+ * Macro to restore the `smc_ctx_t`, which includes the General purpose
+ * registers and banked mode registers, and exit from the monitor mode.
+ * r0 must point to the `smc_ctx_t` to restore from.
+ */
+       .macro monitor_exit
+       /*
+        * Save the current sp and restore the smc context
+        * pointer to sp which will be used for handling the
+        * next SMC.
+        */
+       str     sp, [r0, #SMC_CTX_SP_MON]
+       mov     sp, r0
+
+       /*
+        * Restore SCR first so that we access the right banked register
+        * when the other mode registers are restored.
+        */
+       ldr     r1, [r0, #SMC_CTX_SCR]
+       stcopr  r1, SCR
+       isb
+
+       /*
+        * Restore the PMCR register.
+        */
+       ldr     r1, [r0, #SMC_CTX_PMCR]
+       stcopr  r1, PMCR
+
+       /* Restore the banked registers including the current SPSR */
+       add     r1, r0, #SMC_CTX_SP_USR
+
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+       /* Must be in secure state to restore Monitor mode */
+       ldcopr  r4, SCR
+       bic     r2, r4, #SCR_NS_BIT
+       stcopr  r2, SCR
+       isb
+
+       cps     #MODE32_sys
+       ldm     r1!, {sp, lr}
+
+       cps     #MODE32_irq
+       ldm     r1!, {r2, sp, lr}
+       msr     spsr_fsxc, r2
+
+       cps     #MODE32_fiq
+       ldm     r1!, {r2, sp, lr}
+       msr     spsr_fsxc, r2
+
+       cps     #MODE32_svc
+       ldm     r1!, {r2, sp, lr}
+       msr     spsr_fsxc, r2
+
+       cps     #MODE32_abt
+       ldm     r1!, {r2, sp, lr}
+       msr     spsr_fsxc, r2
+
+       cps     #MODE32_und
+       ldm     r1!, {r2, sp, lr}
+       msr     spsr_fsxc, r2
+
+       cps     #MODE32_mon
+       ldm     r1!, {r2}
+       msr     spsr_fsxc, r2
+
+       stcopr  r4, SCR
+       isb
+#else
+       ldm     r1!, {r4-r12}
+       msr     sp_usr, r4
+       msr     lr_usr, r5
+       msr     spsr_irq, r6
+       msr     sp_irq, r7
+       msr     lr_irq, r8
+       msr     spsr_fiq, r9
+       msr     sp_fiq, r10
+       msr     lr_fiq, r11
+       msr     spsr_svc, r12
+
+       ldm     r1!, {r4-r12}
+       msr     sp_svc, r4
+       msr     lr_svc, r5
+       msr     spsr_abt, r6
+       msr     sp_abt, r7
+       msr     lr_abt, r8
+       msr     spsr_und, r9
+       msr     sp_und, r10
+       msr     lr_und, r11
+       /*
+        * Use the `_fsxc` suffix explicitly to instruct the assembler
+        * to update all the 32 bits of SPSR. Else, by default, the
+        * assembler assumes `_fc` suffix which only modifies
+        * f->[31:24] and c->[7:0] bits of SPSR.
+        */
+       msr     spsr_fsxc, r12
+#endif
+
+       /* Restore the LR */
+       ldr     lr, [r0, #SMC_CTX_LR_MON]
+
+       /* Restore the rest of the general purpose registers */
+       ldm     r0, {r0-r12}
+       eret
+       .endm
+
+#endif /* SMCCC_MACROS_S */
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
new file mode 100644 (file)
index 0000000..72a14dc
--- /dev/null
@@ -0,0 +1,823 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_H
+#define ARCH_H
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * MIDR bit definitions
+ ******************************************************************************/
+#define MIDR_IMPL_MASK         U(0xff)
+#define MIDR_IMPL_SHIFT                U(0x18)
+#define MIDR_VAR_SHIFT         U(20)
+#define MIDR_VAR_BITS          U(4)
+#define MIDR_VAR_MASK          U(0xf)
+#define MIDR_REV_SHIFT         U(0)
+#define MIDR_REV_BITS          U(4)
+#define MIDR_REV_MASK          U(0xf)
+#define MIDR_PN_MASK           U(0xfff)
+#define MIDR_PN_SHIFT          U(0x4)
+
+/*******************************************************************************
+ * MPIDR macros
+ ******************************************************************************/
+#define MPIDR_MT_MASK          (ULL(1) << 24)
+#define MPIDR_CPU_MASK         MPIDR_AFFLVL_MASK
+#define MPIDR_CLUSTER_MASK     (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
+#define MPIDR_AFFINITY_BITS    U(8)
+#define MPIDR_AFFLVL_MASK      ULL(0xff)
+#define MPIDR_AFF0_SHIFT       U(0)
+#define MPIDR_AFF1_SHIFT       U(8)
+#define MPIDR_AFF2_SHIFT       U(16)
+#define MPIDR_AFF3_SHIFT       U(32)
+#define MPIDR_AFF_SHIFT(_n)    MPIDR_AFF##_n##_SHIFT
+#define MPIDR_AFFINITY_MASK    ULL(0xff00ffffff)
+#define MPIDR_AFFLVL_SHIFT     U(3)
+#define MPIDR_AFFLVL0          ULL(0x0)
+#define MPIDR_AFFLVL1          ULL(0x1)
+#define MPIDR_AFFLVL2          ULL(0x2)
+#define MPIDR_AFFLVL3          ULL(0x3)
+#define MPIDR_AFFLVL(_n)       MPIDR_AFFLVL##_n
+#define MPIDR_AFFLVL0_VAL(mpidr) \
+               (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL1_VAL(mpidr) \
+               (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL2_VAL(mpidr) \
+               (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
+#define MPIDR_AFFLVL3_VAL(mpidr) \
+               (((mpidr) >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
+/*
+ * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
+ * add one while using this macro to define array sizes.
+ * TODO: Support only the first 3 affinity levels for now.
+ */
+#define MPIDR_MAX_AFFLVL       U(2)
+
+#define MPID_MASK              (MPIDR_MT_MASK                           | \
+                                (MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT) | \
+                                (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | \
+                                (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) | \
+                                (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
+
+#define MPIDR_AFF_ID(mpid, n)                                  \
+       (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
+
+/*
+ * An invalid MPID. This value can be used by functions that return an MPID to
+ * indicate an error.
+ */
+#define INVALID_MPID           U(0xFFFFFFFF)
+
+/*******************************************************************************
+ * Definitions for CPU system register interface to GICv3
+ ******************************************************************************/
+#define ICC_IGRPEN1_EL1                S3_0_C12_C12_7
+#define ICC_SGI1R              S3_0_C12_C11_5
+#define ICC_SRE_EL1            S3_0_C12_C12_5
+#define ICC_SRE_EL2            S3_4_C12_C9_5
+#define ICC_SRE_EL3            S3_6_C12_C12_5
+#define ICC_CTLR_EL1           S3_0_C12_C12_4
+#define ICC_CTLR_EL3           S3_6_C12_C12_4
+#define ICC_PMR_EL1            S3_0_C4_C6_0
+#define ICC_RPR_EL1            S3_0_C12_C11_3
+#define ICC_IGRPEN1_EL3                S3_6_c12_c12_7
+#define ICC_IGRPEN0_EL1                S3_0_c12_c12_6
+#define ICC_HPPIR0_EL1         S3_0_c12_c8_2
+#define ICC_HPPIR1_EL1         S3_0_c12_c12_2
+#define ICC_IAR0_EL1           S3_0_c12_c8_0
+#define ICC_IAR1_EL1           S3_0_c12_c12_0
+#define ICC_EOIR0_EL1          S3_0_c12_c8_1
+#define ICC_EOIR1_EL1          S3_0_c12_c12_1
+#define ICC_SGI0R_EL1          S3_0_c12_c11_7
+
+/*******************************************************************************
+ * Generic timer memory mapped registers & offsets
+ ******************************************************************************/
+#define CNTCR_OFF                      U(0x000)
+#define CNTFID_OFF                     U(0x020)
+
+#define CNTCR_EN                       (U(1) << 0)
+#define CNTCR_HDBG                     (U(1) << 1)
+#define CNTCR_FCREQ(x)                 ((x) << 8)
+
+/*******************************************************************************
+ * System register bit definitions
+ ******************************************************************************/
+/* CLIDR definitions */
+#define LOUIS_SHIFT            U(21)
+#define LOC_SHIFT              U(24)
+#define CLIDR_FIELD_WIDTH      U(3)
+
+/* CSSELR definitions */
+#define LEVEL_SHIFT            U(1)
+
+/* Data cache set/way op type defines */
+#define DCISW                  U(0x0)
+#define DCCISW                 U(0x1)
+#define DCCSW                  U(0x2)
+
+/* ID_AA64PFR0_EL1 definitions */
+#define ID_AA64PFR0_EL0_SHIFT  U(0)
+#define ID_AA64PFR0_EL1_SHIFT  U(4)
+#define ID_AA64PFR0_EL2_SHIFT  U(8)
+#define ID_AA64PFR0_EL3_SHIFT  U(12)
+#define ID_AA64PFR0_AMU_SHIFT  U(44)
+#define ID_AA64PFR0_AMU_LENGTH U(4)
+#define ID_AA64PFR0_AMU_MASK   ULL(0xf)
+#define ID_AA64PFR0_ELX_MASK   ULL(0xf)
+#define ID_AA64PFR0_SVE_SHIFT  U(32)
+#define ID_AA64PFR0_SVE_MASK   ULL(0xf)
+#define ID_AA64PFR0_SVE_LENGTH U(4)
+#define ID_AA64PFR0_MPAM_SHIFT U(40)
+#define ID_AA64PFR0_MPAM_MASK  ULL(0xf)
+#define ID_AA64PFR0_DIT_SHIFT  U(48)
+#define ID_AA64PFR0_DIT_MASK   ULL(0xf)
+#define ID_AA64PFR0_DIT_LENGTH U(4)
+#define ID_AA64PFR0_DIT_SUPPORTED      U(1)
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK  ULL(0xf)
+#define ID_AA64PFR0_CSV2_LENGTH        U(4)
+
+/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
+#define ID_AA64DFR0_PMS_SHIFT  U(32)
+#define ID_AA64DFR0_PMS_LENGTH U(4)
+#define ID_AA64DFR0_PMS_MASK   ULL(0xf)
+
+#define EL_IMPL_NONE           ULL(0)
+#define EL_IMPL_A64ONLY                ULL(1)
+#define EL_IMPL_A64_A32                ULL(2)
+
+#define ID_AA64PFR0_GIC_SHIFT  U(24)
+#define ID_AA64PFR0_GIC_WIDTH  U(4)
+#define ID_AA64PFR0_GIC_MASK   ((ULL(1) << ID_AA64PFR0_GIC_WIDTH) - ULL(1))
+
+/* ID_AA64MMFR0_EL1 definitions */
+#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
+#define ID_AA64MMFR0_EL1_PARANGE_MASK  ULL(0xf)
+
+/* ID_AA64ISAR1_EL1 definitions */
+#define ID_AA64ISAR1_GPI_SHIFT U(28)
+#define ID_AA64ISAR1_GPI_WIDTH U(4)
+#define ID_AA64ISAR1_GPA_SHIFT U(24)
+#define ID_AA64ISAR1_GPA_WIDTH U(4)
+#define ID_AA64ISAR1_API_SHIFT U(8)
+#define ID_AA64ISAR1_API_WIDTH U(4)
+#define ID_AA64ISAR1_APA_SHIFT U(4)
+#define ID_AA64ISAR1_APA_WIDTH U(4)
+
+#define ID_AA64ISAR1_GPI_MASK \
+       (((ULL(1) << ID_AA64ISAR1_GPI_WIDTH) - ULL(1)) << ID_AA64ISAR1_GPI_SHIFT)
+#define ID_AA64ISAR1_GPA_MASK \
+       (((ULL(1) << ID_AA64ISAR1_GPA_WIDTH) - ULL(1)) << ID_AA64ISAR1_GPA_SHIFT)
+#define ID_AA64ISAR1_API_MASK \
+       (((ULL(1) << ID_AA64ISAR1_API_WIDTH) - ULL(1)) << ID_AA64ISAR1_API_SHIFT)
+#define ID_AA64ISAR1_APA_MASK \
+       (((ULL(1) << ID_AA64ISAR1_APA_WIDTH) - ULL(1)) << ID_AA64ISAR1_APA_SHIFT)
+
+#define PARANGE_0000   U(32)
+#define PARANGE_0001   U(36)
+#define PARANGE_0010   U(40)
+#define PARANGE_0011   U(42)
+#define PARANGE_0100   U(44)
+#define PARANGE_0101   U(48)
+#define PARANGE_0110   U(52)
+
+#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT          U(28)
+#define ID_AA64MMFR0_EL1_TGRAN4_MASK           ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED      ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED  ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT         U(24)
+#define ID_AA64MMFR0_EL1_TGRAN64_MASK          ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED     ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT         U(20)
+#define ID_AA64MMFR0_EL1_TGRAN16_MASK          ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED     ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
+
+/* ID_AA64PFR1_EL1 definitions */
+#define ID_AA64PFR1_EL1_SSBS_SHIFT     U(4)
+#define ID_AA64PFR1_EL1_SSBS_MASK      ULL(0xf)
+
+#define SSBS_UNAVAILABLE       ULL(0)  /* No architectural SSBS support */
+
+/* ID_PFR1_EL1 definitions */
+#define ID_PFR1_VIRTEXT_SHIFT  U(12)
+#define ID_PFR1_VIRTEXT_MASK   U(0xf)
+#define GET_VIRT_EXT(id)       (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
+                                & ID_PFR1_VIRTEXT_MASK)
+
+/* SCTLR definitions */
+#define SCTLR_EL2_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+                        (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+                        (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_EL1_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+                        (U(1) << 22) | (U(1) << 20) | (U(1) << 11))
+#define SCTLR_AARCH32_EL1_RES1 \
+                       ((U(1) << 23) | (U(1) << 22) | (U(1) << 11) | \
+                        (U(1) << 4) | (U(1) << 3))
+
+#define SCTLR_EL3_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
+                       (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
+                       (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
+
+#define SCTLR_M_BIT            (ULL(1) << 0)
+#define SCTLR_A_BIT            (ULL(1) << 1)
+#define SCTLR_C_BIT            (ULL(1) << 2)
+#define SCTLR_SA_BIT           (ULL(1) << 3)
+#define SCTLR_SA0_BIT          (ULL(1) << 4)
+#define SCTLR_CP15BEN_BIT      (ULL(1) << 5)
+#define SCTLR_ITD_BIT          (ULL(1) << 7)
+#define SCTLR_SED_BIT          (ULL(1) << 8)
+#define SCTLR_UMA_BIT          (ULL(1) << 9)
+#define SCTLR_I_BIT            (ULL(1) << 12)
+#define SCTLR_V_BIT            (ULL(1) << 13)
+#define SCTLR_DZE_BIT          (ULL(1) << 14)
+#define SCTLR_UCT_BIT          (ULL(1) << 15)
+#define SCTLR_NTWI_BIT         (ULL(1) << 16)
+#define SCTLR_NTWE_BIT         (ULL(1) << 18)
+#define SCTLR_WXN_BIT          (ULL(1) << 19)
+#define SCTLR_UWXN_BIT         (ULL(1) << 20)
+#define SCTLR_E0E_BIT          (ULL(1) << 24)
+#define SCTLR_EE_BIT           (ULL(1) << 25)
+#define SCTLR_UCI_BIT          (ULL(1) << 26)
+#define SCTLR_TRE_BIT          (ULL(1) << 28)
+#define SCTLR_AFE_BIT          (ULL(1) << 29)
+#define SCTLR_TE_BIT           (ULL(1) << 30)
+#define SCTLR_DSSBS_BIT                (ULL(1) << 44)
+#define SCTLR_RESET_VAL                SCTLR_EL3_RES1
+
+/* CPACR_El1 definitions */
+#define CPACR_EL1_FPEN(x)      ((x) << 20)
+#define CPACR_EL1_FP_TRAP_EL0  U(0x1)
+#define CPACR_EL1_FP_TRAP_ALL  U(0x2)
+#define CPACR_EL1_FP_TRAP_NONE U(0x3)
+
+/* SCR definitions */
+#define SCR_RES1_BITS          ((U(1) << 4) | (U(1) << 5))
+#define SCR_FIEN_BIT           (U(1) << 21)
+#define SCR_API_BIT            (U(1) << 17)
+#define SCR_APK_BIT            (U(1) << 16)
+#define SCR_TWE_BIT            (U(1) << 13)
+#define SCR_TWI_BIT            (U(1) << 12)
+#define SCR_ST_BIT             (U(1) << 11)
+#define SCR_RW_BIT             (U(1) << 10)
+#define SCR_SIF_BIT            (U(1) << 9)
+#define SCR_HCE_BIT            (U(1) << 8)
+#define SCR_SMD_BIT            (U(1) << 7)
+#define SCR_EA_BIT             (U(1) << 3)
+#define SCR_FIQ_BIT            (U(1) << 2)
+#define SCR_IRQ_BIT            (U(1) << 1)
+#define SCR_NS_BIT             (U(1) << 0)
+#define SCR_VALID_BIT_MASK     U(0x2f8f)
+#define SCR_RESET_VAL          SCR_RES1_BITS
+
+/* MDCR_EL3 definitions */
+#define MDCR_SPD32(x)          ((x) << 14)
+#define MDCR_SPD32_LEGACY      U(0x0)
+#define MDCR_SPD32_DISABLE     U(0x2)
+#define MDCR_SPD32_ENABLE      U(0x3)
+#define MDCR_SDD_BIT           (U(1) << 16)
+#define MDCR_NSPB(x)           ((x) << 12)
+#define MDCR_NSPB_EL1          U(0x3)
+#define MDCR_TDOSA_BIT         (U(1) << 10)
+#define MDCR_TDA_BIT           (U(1) << 9)
+#define MDCR_TPM_BIT           (U(1) << 6)
+#define MDCR_EL3_RESET_VAL     U(0x0)
+
+/* MDCR_EL2 definitions */
+#define MDCR_EL2_TPMS          (U(1) << 14)
+#define MDCR_EL2_E2PB(x)       ((x) << 12)
+#define MDCR_EL2_E2PB_EL1      U(0x3)
+#define MDCR_EL2_TDRA_BIT      (U(1) << 11)
+#define MDCR_EL2_TDOSA_BIT     (U(1) << 10)
+#define MDCR_EL2_TDA_BIT       (U(1) << 9)
+#define MDCR_EL2_TDE_BIT       (U(1) << 8)
+#define MDCR_EL2_HPME_BIT      (U(1) << 7)
+#define MDCR_EL2_TPM_BIT       (U(1) << 6)
+#define MDCR_EL2_TPMCR_BIT     (U(1) << 5)
+#define MDCR_EL2_RESET_VAL     U(0x0)
+
+/* HSTR_EL2 definitions */
+#define HSTR_EL2_RESET_VAL     U(0x0)
+#define HSTR_EL2_T_MASK                U(0xff)
+
+/* CNTHP_CTL_EL2 definitions */
+#define CNTHP_CTL_ENABLE_BIT   (U(1) << 0)
+#define CNTHP_CTL_RESET_VAL    U(0x0)
+
+/* VTTBR_EL2 definitions */
+#define VTTBR_RESET_VAL                ULL(0x0)
+#define VTTBR_VMID_MASK                ULL(0xff)
+#define VTTBR_VMID_SHIFT       U(48)
+#define VTTBR_BADDR_MASK       ULL(0xffffffffffff)
+#define VTTBR_BADDR_SHIFT      U(0)
+
+/* HCR definitions */
+#define HCR_API_BIT            (ULL(1) << 41)
+#define HCR_APK_BIT            (ULL(1) << 40)
+#define HCR_TGE_BIT            (ULL(1) << 27)
+#define HCR_RW_SHIFT           U(31)
+#define HCR_RW_BIT             (ULL(1) << HCR_RW_SHIFT)
+#define HCR_AMO_BIT            (ULL(1) << 5)
+#define HCR_IMO_BIT            (ULL(1) << 4)
+#define HCR_FMO_BIT            (ULL(1) << 3)
+
+/* ISR definitions */
+#define ISR_A_SHIFT            U(8)
+#define ISR_I_SHIFT            U(7)
+#define ISR_F_SHIFT            U(6)
+
+/* CNTHCTL_EL2 definitions */
+#define CNTHCTL_RESET_VAL      U(0x0)
+#define EVNTEN_BIT             (U(1) << 2)
+#define EL1PCEN_BIT            (U(1) << 1)
+#define EL1PCTEN_BIT           (U(1) << 0)
+
+/* CNTKCTL_EL1 definitions */
+#define EL0PTEN_BIT            (U(1) << 9)
+#define EL0VTEN_BIT            (U(1) << 8)
+#define EL0PCTEN_BIT           (U(1) << 0)
+#define EL0VCTEN_BIT           (U(1) << 1)
+#define EVNTEN_BIT             (U(1) << 2)
+#define EVNTDIR_BIT            (U(1) << 3)
+#define EVNTI_SHIFT            U(4)
+#define EVNTI_MASK             U(0xf)
+
+/* CPTR_EL3 definitions */
+#define TCPAC_BIT              (U(1) << 31)
+#define TAM_BIT                        (U(1) << 30)
+#define TTA_BIT                        (U(1) << 20)
+#define TFP_BIT                        (U(1) << 10)
+#define CPTR_EZ_BIT            (U(1) << 8)
+#define CPTR_EL3_RESET_VAL     U(0x0)
+
+/* CPTR_EL2 definitions */
+#define CPTR_EL2_RES1          ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
+#define CPTR_EL2_TCPAC_BIT     (U(1) << 31)
+#define CPTR_EL2_TAM_BIT       (U(1) << 30)
+#define CPTR_EL2_TTA_BIT       (U(1) << 20)
+#define CPTR_EL2_TFP_BIT       (U(1) << 10)
+#define CPTR_EL2_TZ_BIT                (U(1) << 8)
+#define CPTR_EL2_RESET_VAL     CPTR_EL2_RES1
+
+/* CPSR/SPSR definitions */
+#define DAIF_FIQ_BIT           (U(1) << 0)
+#define DAIF_IRQ_BIT           (U(1) << 1)
+#define DAIF_ABT_BIT           (U(1) << 2)
+#define DAIF_DBG_BIT           (U(1) << 3)
+#define SPSR_DAIF_SHIFT                U(6)
+#define SPSR_DAIF_MASK         U(0xf)
+
+#define SPSR_AIF_SHIFT         U(6)
+#define SPSR_AIF_MASK          U(0x7)
+
+#define SPSR_E_SHIFT           U(9)
+#define SPSR_E_MASK            U(0x1)
+#define SPSR_E_LITTLE          U(0x0)
+#define SPSR_E_BIG             U(0x1)
+
+#define SPSR_T_SHIFT           U(5)
+#define SPSR_T_MASK            U(0x1)
+#define SPSR_T_ARM             U(0x0)
+#define SPSR_T_THUMB           U(0x1)
+
+#define SPSR_M_SHIFT           U(4)
+#define SPSR_M_MASK            U(0x1)
+#define SPSR_M_AARCH64         U(0x0)
+#define SPSR_M_AARCH32         U(0x1)
+
+#define DISABLE_ALL_EXCEPTIONS \
+               (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
+
+#define DISABLE_INTERRUPTS     (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
+
+/*
+ * RMR_EL3 definitions
+ */
+#define RMR_EL3_RR_BIT         (U(1) << 1)
+#define RMR_EL3_AA64_BIT       (U(1) << 0)
+
+/*
+ * HI-VECTOR address for AArch32 state
+ */
+#define HI_VECTOR_BASE         U(0xFFFF0000)
+
+/*
+ * TCR defintions
+ */
+#define TCR_EL3_RES1           ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL2_RES1           ((ULL(1) << 31) | (ULL(1) << 23))
+#define TCR_EL1_IPS_SHIFT      U(32)
+#define TCR_EL2_PS_SHIFT       U(16)
+#define TCR_EL3_PS_SHIFT       U(16)
+
+#define TCR_TxSZ_MIN           ULL(16)
+#define TCR_TxSZ_MAX           ULL(39)
+
+/* (internal) physical address size bits in EL3/EL1 */
+#define TCR_PS_BITS_4GB                ULL(0x0)
+#define TCR_PS_BITS_64GB       ULL(0x1)
+#define TCR_PS_BITS_1TB                ULL(0x2)
+#define TCR_PS_BITS_4TB                ULL(0x3)
+#define TCR_PS_BITS_16TB       ULL(0x4)
+#define TCR_PS_BITS_256TB      ULL(0x5)
+
+#define ADDR_MASK_48_TO_63     ULL(0xFFFF000000000000)
+#define ADDR_MASK_44_TO_47     ULL(0x0000F00000000000)
+#define ADDR_MASK_42_TO_43     ULL(0x00000C0000000000)
+#define ADDR_MASK_40_TO_41     ULL(0x0000030000000000)
+#define ADDR_MASK_36_TO_39     ULL(0x000000F000000000)
+#define ADDR_MASK_32_TO_35     ULL(0x0000000F00000000)
+
+#define TCR_RGN_INNER_NC       (ULL(0x0) << 8)
+#define TCR_RGN_INNER_WBA      (ULL(0x1) << 8)
+#define TCR_RGN_INNER_WT       (ULL(0x2) << 8)
+#define TCR_RGN_INNER_WBNA     (ULL(0x3) << 8)
+
+#define TCR_RGN_OUTER_NC       (ULL(0x0) << 10)
+#define TCR_RGN_OUTER_WBA      (ULL(0x1) << 10)
+#define TCR_RGN_OUTER_WT       (ULL(0x2) << 10)
+#define TCR_RGN_OUTER_WBNA     (ULL(0x3) << 10)
+
+#define TCR_SH_NON_SHAREABLE   (ULL(0x0) << 12)
+#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
+#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
+
+#define TCR_TG0_SHIFT          U(14)
+#define TCR_TG0_MASK           ULL(3)
+#define TCR_TG0_4K             (ULL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K            (ULL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K            (ULL(2) << TCR_TG0_SHIFT)
+
+#define TCR_EPD0_BIT           (ULL(1) << 7)
+#define TCR_EPD1_BIT           (ULL(1) << 23)
+
+#define MODE_SP_SHIFT          U(0x0)
+#define MODE_SP_MASK           U(0x1)
+#define MODE_SP_EL0            U(0x0)
+#define MODE_SP_ELX            U(0x1)
+
+#define MODE_RW_SHIFT          U(0x4)
+#define MODE_RW_MASK           U(0x1)
+#define MODE_RW_64             U(0x0)
+#define MODE_RW_32             U(0x1)
+
+#define MODE_EL_SHIFT          U(0x2)
+#define MODE_EL_MASK           U(0x3)
+#define MODE_EL3               U(0x3)
+#define MODE_EL2               U(0x2)
+#define MODE_EL1               U(0x1)
+#define MODE_EL0               U(0x0)
+
+#define MODE32_SHIFT           U(0)
+#define MODE32_MASK            U(0xf)
+#define MODE32_usr             U(0x0)
+#define MODE32_fiq             U(0x1)
+#define MODE32_irq             U(0x2)
+#define MODE32_svc             U(0x3)
+#define MODE32_mon             U(0x6)
+#define MODE32_abt             U(0x7)
+#define MODE32_hyp             U(0xa)
+#define MODE32_und             U(0xb)
+#define MODE32_sys             U(0xf)
+
+#define GET_RW(mode)           (((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
+#define GET_EL(mode)           (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
+#define GET_SP(mode)           (((mode) >> MODE_SP_SHIFT) & MODE_SP_MASK)
+#define GET_M32(mode)          (((mode) >> MODE32_SHIFT) & MODE32_MASK)
+
+#define SPSR_64(el, sp, daif)                          \
+       ((MODE_RW_64 << MODE_RW_SHIFT) |                \
+       (((el) & MODE_EL_MASK) << MODE_EL_SHIFT) |      \
+       (((sp) & MODE_SP_MASK) << MODE_SP_SHIFT) |      \
+       (((daif) & SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT))
+
+#define SPSR_MODE32(mode, isa, endian, aif)            \
+       ((MODE_RW_32 << MODE_RW_SHIFT) |                \
+       (((mode) & MODE32_MASK) << MODE32_SHIFT) |      \
+       (((isa) & SPSR_T_MASK) << SPSR_T_SHIFT) |       \
+       (((endian) & SPSR_E_MASK) << SPSR_E_SHIFT) |    \
+       (((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT))
+
+/*
+ * TTBR Definitions
+ */
+#define TTBR_CNP_BIT           ULL(0x1)
+
+/*
+ * CTR_EL0 definitions
+ */
+#define CTR_CWG_SHIFT          U(24)
+#define CTR_CWG_MASK           U(0xf)
+#define CTR_ERG_SHIFT          U(20)
+#define CTR_ERG_MASK           U(0xf)
+#define CTR_DMINLINE_SHIFT     U(16)
+#define CTR_DMINLINE_MASK      U(0xf)
+#define CTR_L1IP_SHIFT         U(14)
+#define CTR_L1IP_MASK          U(0x3)
+#define CTR_IMINLINE_SHIFT     U(0)
+#define CTR_IMINLINE_MASK      U(0xf)
+
+#define MAX_CACHE_LINE_SIZE    U(0x800) /* 2KB */
+
+/* Physical timer control register bit fields shifts and masks */
+#define CNTP_CTL_ENABLE_SHIFT   U(0)
+#define CNTP_CTL_IMASK_SHIFT    U(1)
+#define CNTP_CTL_ISTATUS_SHIFT  U(2)
+
+#define CNTP_CTL_ENABLE_MASK    U(1)
+#define CNTP_CTL_IMASK_MASK     U(1)
+#define CNTP_CTL_ISTATUS_MASK   U(1)
+
+/* Exception Syndrome register bits and bobs */
+#define ESR_EC_SHIFT                   U(26)
+#define ESR_EC_MASK                    U(0x3f)
+#define ESR_EC_LENGTH                  U(6)
+#define EC_UNKNOWN                     U(0x0)
+#define EC_WFE_WFI                     U(0x1)
+#define EC_AARCH32_CP15_MRC_MCR                U(0x3)
+#define EC_AARCH32_CP15_MRRC_MCRR      U(0x4)
+#define EC_AARCH32_CP14_MRC_MCR                U(0x5)
+#define EC_AARCH32_CP14_LDC_STC                U(0x6)
+#define EC_FP_SIMD                     U(0x7)
+#define EC_AARCH32_CP10_MRC            U(0x8)
+#define EC_AARCH32_CP14_MRRC_MCRR      U(0xc)
+#define EC_ILLEGAL                     U(0xe)
+#define EC_AARCH32_SVC                 U(0x11)
+#define EC_AARCH32_HVC                 U(0x12)
+#define EC_AARCH32_SMC                 U(0x13)
+#define EC_AARCH64_SVC                 U(0x15)
+#define EC_AARCH64_HVC                 U(0x16)
+#define EC_AARCH64_SMC                 U(0x17)
+#define EC_AARCH64_SYS                 U(0x18)
+#define EC_IABORT_LOWER_EL             U(0x20)
+#define EC_IABORT_CUR_EL               U(0x21)
+#define EC_PC_ALIGN                    U(0x22)
+#define EC_DABORT_LOWER_EL             U(0x24)
+#define EC_DABORT_CUR_EL               U(0x25)
+#define EC_SP_ALIGN                    U(0x26)
+#define EC_AARCH32_FP                  U(0x28)
+#define EC_AARCH64_FP                  U(0x2c)
+#define EC_SERROR                      U(0x2f)
+
+/*
+ * External Abort bit in Instruction and Data Aborts synchronous exception
+ * syndromes.
+ */
+#define ESR_ISS_EABORT_EA_BIT          U(9)
+
+#define EC_BITS(x)                     (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
+
+/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
+#define RMR_RESET_REQUEST_SHIFT        U(0x1)
+#define RMR_WARM_RESET_CPU             (U(1) << RMR_RESET_REQUEST_SHIFT)
+
+/*******************************************************************************
+ * Definitions of register offsets, fields and macros for CPU system
+ * instructions.
+ ******************************************************************************/
+
+#define TLBI_ADDR_SHIFT                U(12)
+#define TLBI_ADDR_MASK         ULL(0x00000FFFFFFFFFFF)
+#define TLBI_ADDR(x)           (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTCTLBase Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+#define CNTCTLBASE_CNTFRQ      U(0x0)
+#define CNTNSAR                        U(0x4)
+#define CNTNSAR_NS_SHIFT(x)    (x)
+
+#define CNTACR_BASE(x)         (U(0x40) + ((x) << 2))
+#define CNTACR_RPCT_SHIFT      U(0x0)
+#define CNTACR_RVCT_SHIFT      U(0x1)
+#define CNTACR_RFRQ_SHIFT      U(0x2)
+#define CNTACR_RVOFF_SHIFT     U(0x3)
+#define CNTACR_RWVT_SHIFT      U(0x4)
+#define CNTACR_RWPT_SHIFT      U(0x5)
+
+/*******************************************************************************
+ * Definitions of register offsets and fields in the CNTBaseN Frame of the
+ * system level implementation of the Generic Timer.
+ ******************************************************************************/
+/* Physical Count register. */
+#define CNTPCT_LO              U(0x0)
+/* Counter Frequency register. */
+#define CNTBASEN_CNTFRQ                U(0x10)
+/* Physical Timer CompareValue register. */
+#define CNTP_CVAL_LO           U(0x20)
+/* Physical Timer Control register. */
+#define CNTP_CTL               U(0x2c)
+
+/* PMCR_EL0 definitions */
+#define PMCR_EL0_RESET_VAL     U(0x0)
+#define PMCR_EL0_N_SHIFT       U(11)
+#define PMCR_EL0_N_MASK                U(0x1f)
+#define PMCR_EL0_N_BITS                (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
+#define PMCR_EL0_LC_BIT                (U(1) << 6)
+#define PMCR_EL0_DP_BIT                (U(1) << 5)
+#define PMCR_EL0_X_BIT         (U(1) << 4)
+#define PMCR_EL0_D_BIT         (U(1) << 3)
+
+/*******************************************************************************
+ * Definitions for system register interface to SVE
+ ******************************************************************************/
+#define ZCR_EL3                        S3_6_C1_C2_0
+#define ZCR_EL2                        S3_4_C1_C2_0
+
+/* ZCR_EL3 definitions */
+#define ZCR_EL3_LEN_MASK       U(0xf)
+
+/* ZCR_EL2 definitions */
+#define ZCR_EL2_LEN_MASK       U(0xf)
+
+/*******************************************************************************
+ * Definitions of MAIR encodings for device and normal memory
+ ******************************************************************************/
+/*
+ * MAIR encodings for device memory attributes.
+ */
+#define MAIR_DEV_nGnRnE                ULL(0x0)
+#define MAIR_DEV_nGnRE         ULL(0x4)
+#define MAIR_DEV_nGRE          ULL(0x8)
+#define MAIR_DEV_GRE           ULL(0xc)
+
+/*
+ * MAIR encodings for normal memory attributes.
+ *
+ * Cache Policy
+ *  WT:         Write Through
+ *  WB:         Write Back
+ *  NC:         Non-Cacheable
+ *
+ * Transient Hint
+ *  NTR: Non-Transient
+ *  TR:         Transient
+ *
+ * Allocation Policy
+ *  RA:         Read Allocate
+ *  WA:         Write Allocate
+ *  RWA: Read and Write Allocate
+ *  NA:         No Allocation
+ */
+#define MAIR_NORM_WT_TR_WA     ULL(0x1)
+#define MAIR_NORM_WT_TR_RA     ULL(0x2)
+#define MAIR_NORM_WT_TR_RWA    ULL(0x3)
+#define MAIR_NORM_NC           ULL(0x4)
+#define MAIR_NORM_WB_TR_WA     ULL(0x5)
+#define MAIR_NORM_WB_TR_RA     ULL(0x6)
+#define MAIR_NORM_WB_TR_RWA    ULL(0x7)
+#define MAIR_NORM_WT_NTR_NA    ULL(0x8)
+#define MAIR_NORM_WT_NTR_WA    ULL(0x9)
+#define MAIR_NORM_WT_NTR_RA    ULL(0xa)
+#define MAIR_NORM_WT_NTR_RWA   ULL(0xb)
+#define MAIR_NORM_WB_NTR_NA    ULL(0xc)
+#define MAIR_NORM_WB_NTR_WA    ULL(0xd)
+#define MAIR_NORM_WB_NTR_RA    ULL(0xe)
+#define MAIR_NORM_WB_NTR_RWA   ULL(0xf)
+
+#define MAIR_NORM_OUTER_SHIFT  U(4)
+
+#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)  \
+               ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
+
+/* PAR_EL1 fields */
+#define PAR_F_SHIFT    U(0)
+#define PAR_F_MASK     ULL(0x1)
+#define PAR_ADDR_SHIFT U(12)
+#define PAR_ADDR_MASK  (BIT(40) - ULL(1)) /* 40-bits-wide page address */
+
+/*******************************************************************************
+ * Definitions for system register interface to SPE
+ ******************************************************************************/
+#define PMBLIMITR_EL1          S3_0_C9_C10_0
+
+/*******************************************************************************
+ * Definitions for system register interface to MPAM
+ ******************************************************************************/
+#define MPAMIDR_EL1            S3_0_C10_C4_4
+#define MPAM2_EL2              S3_4_C10_C5_0
+#define MPAMHCR_EL2            S3_4_C10_C4_0
+#define MPAM3_EL3              S3_6_C10_C5_0
+
+/*******************************************************************************
+ * Definitions for system register interface to AMU for ARMv8.4 onwards
+ ******************************************************************************/
+#define AMCR_EL0               S3_3_C13_C2_0
+#define AMCFGR_EL0             S3_3_C13_C2_1
+#define AMCGCR_EL0             S3_3_C13_C2_2
+#define AMUSERENR_EL0          S3_3_C13_C2_3
+#define AMCNTENCLR0_EL0                S3_3_C13_C2_4
+#define AMCNTENSET0_EL0                S3_3_C13_C2_5
+#define AMCNTENCLR1_EL0                S3_3_C13_C3_0
+#define AMCNTENSET1_EL0                S3_3_C13_C3_1
+
+/* Activity Monitor Group 0 Event Counter Registers */
+#define AMEVCNTR00_EL0         S3_3_C13_C4_0
+#define AMEVCNTR01_EL0         S3_3_C13_C4_1
+#define AMEVCNTR02_EL0         S3_3_C13_C4_2
+#define AMEVCNTR03_EL0         S3_3_C13_C4_3
+
+/* Activity Monitor Group 0 Event Type Registers */
+#define AMEVTYPER00_EL0                S3_3_C13_C6_0
+#define AMEVTYPER01_EL0                S3_3_C13_C6_1
+#define AMEVTYPER02_EL0                S3_3_C13_C6_2
+#define AMEVTYPER03_EL0                S3_3_C13_C6_3
+
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0         S3_3_C13_C12_0
+#define AMEVCNTR11_EL0         S3_3_C13_C12_1
+#define AMEVCNTR12_EL0         S3_3_C13_C12_2
+#define AMEVCNTR13_EL0         S3_3_C13_C12_3
+#define AMEVCNTR14_EL0         S3_3_C13_C12_4
+#define AMEVCNTR15_EL0         S3_3_C13_C12_5
+#define AMEVCNTR16_EL0         S3_3_C13_C12_6
+#define AMEVCNTR17_EL0         S3_3_C13_C12_7
+#define AMEVCNTR18_EL0         S3_3_C13_C13_0
+#define AMEVCNTR19_EL0         S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0         S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0         S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0         S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0         S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0         S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0         S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0                S3_3_C13_C14_0
+#define AMEVTYPER11_EL0                S3_3_C13_C14_1
+#define AMEVTYPER12_EL0                S3_3_C13_C14_2
+#define AMEVTYPER13_EL0                S3_3_C13_C14_3
+#define AMEVTYPER14_EL0                S3_3_C13_C14_4
+#define AMEVTYPER15_EL0                S3_3_C13_C14_5
+#define AMEVTYPER16_EL0                S3_3_C13_C14_6
+#define AMEVTYPER17_EL0                S3_3_C13_C14_7
+#define AMEVTYPER18_EL0                S3_3_C13_C15_0
+#define AMEVTYPER19_EL0                S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0                S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0                S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0                S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0                S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0                S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0                S3_3_C13_C15_7
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_LENGTH        U(8)
+#define AMCGCR_EL0_CG1NC_MASK  U(0xff)
+
+/* MPAM register definitions */
+#define MPAM3_EL3_MPAMEN_BIT           (ULL(1) << 63)
+
+#define MPAMIDR_HAS_HCR_BIT            (ULL(1) << 17)
+
+/*******************************************************************************
+ * RAS system registers
+ ******************************************************************************/
+#define DISR_EL1               S3_0_C12_C1_1
+#define DISR_A_BIT             U(31)
+
+#define ERRIDR_EL1             S3_0_C5_C3_0
+#define ERRIDR_MASK            U(0xffff)
+
+#define ERRSELR_EL1            S3_0_C5_C3_1
+
+/* System register access to Standard Error Record registers */
+#define ERXFR_EL1              S3_0_C5_C4_0
+#define ERXCTLR_EL1            S3_0_C5_C4_1
+#define ERXSTATUS_EL1          S3_0_C5_C4_2
+#define ERXADDR_EL1            S3_0_C5_C4_3
+#define ERXPFGF_EL1            S3_0_C5_C4_4
+#define ERXPFGCTL_EL1          S3_0_C5_C4_5
+#define ERXPFGCDN_EL1          S3_0_C5_C4_6
+#define ERXMISC0_EL1           S3_0_C5_C5_0
+#define ERXMISC1_EL1           S3_0_C5_C5_1
+
+#define ERXCTLR_ED_BIT         (U(1) << 0)
+#define ERXCTLR_UE_BIT         (U(1) << 4)
+
+#define ERXPFGCTL_UC_BIT       (U(1) << 1)
+#define ERXPFGCTL_UEU_BIT      (U(1) << 2)
+#define ERXPFGCTL_CDEN_BIT     (U(1) << 31)
+
+/*******************************************************************************
+ * Armv8.3 Pointer Authentication Registers
+ ******************************************************************************/
+#define APGAKeyLo_EL1          S3_0_C2_C3_0
+
+/*******************************************************************************
+ * Armv8.4 Data Independent Timing Registers
+ ******************************************************************************/
+#define DIT                    S3_3_C4_C2_5
+#define DIT_BIT                        BIT(24)
+
+#endif /* ARCH_H */
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
new file mode 100644 (file)
index 0000000..7222b9d
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ARCH_HELPERS_H
+#define ARCH_HELPERS_H
+
+#include <arch.h>
+#include <cdefs.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)             \
+static inline u_register_t read_ ## _name(void)                        \
+{                                                              \
+       u_register_t v;                                         \
+       __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v));    \
+       return v;                                               \
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)                    \
+static inline void write_ ## _name(u_register_t v)                     \
+{                                                                      \
+       __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v));        \
+}
+
+#define SYSREG_WRITE_CONST(reg_name, v)                                \
+       __asm__ volatile ("msr " #reg_name ", %0" : : "i" (v))
+
+/* Define read function for system register */
+#define DEFINE_SYSREG_READ_FUNC(_name)                         \
+       _DEFINE_SYSREG_READ_FUNC(_name, _name)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name)                  \
+       _DEFINE_SYSREG_READ_FUNC(_name, _name)          \
+       _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/* Define read & write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_RW_FUNCS(_name, _reg_name)        \
+       _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)      \
+       _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/* Define read function for renamed system register */
+#define DEFINE_RENAME_SYSREG_READ_FUNC(_name, _reg_name)       \
+       _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)
+
+/* Define write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_WRITE_FUNC(_name, _reg_name)      \
+       _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+
+/* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op)                         \
+static inline void _op(void)                           \
+{                                                      \
+       __asm__ (#_op);                                 \
+}
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type)             \
+static inline void _op ## _type(void)                  \
+{                                                      \
+       __asm__ (#_op " " #_type);                      \
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type)       \
+static inline void _op ## _type(uint64_t v)            \
+{                                                      \
+        __asm__ (#_op " " #_type ", %0" : : "r" (v));  \
+}
+
+/*******************************************************************************
+ * TLB maintenance accessor prototypes
+ ******************************************************************************/
+
+#if ERRATA_A57_813419
+/*
+ * Define function for TLBI instruction with type specifier that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(_type)\
+static inline void tlbi ## _type(void)                 \
+{                                                      \
+       __asm__("tlbi " #_type "\n"                     \
+               "dsb ish\n"                             \
+               "tlbi " #_type);                        \
+}
+
+/*
+ * Define function for TLBI instruction with register parameter that implements
+ * the workaround for errata 813419 of Cortex-A57.
+ */
+#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(_type) \
+static inline void tlbi ## _type(uint64_t v)                   \
+{                                                              \
+       __asm__("tlbi " #_type ", %0\n"                         \
+               "dsb ish\n"                                     \
+               "tlbi " #_type ", %0" : : "r" (v));             \
+}
+#endif /* ERRATA_A57_813419 */
+
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3is)
+#else
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
+#endif
+DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
+
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
+#if ERRATA_A57_813419
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vae3is)
+DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vale3is)
+#else
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
+#endif
+
+/*******************************************************************************
+ * Cache maintenance accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, isw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cisw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, csw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, ivac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, civac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
+
+/*******************************************************************************
+ * Address translation accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e1r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e2r)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e3r)
+
+void flush_dcache_range(uintptr_t addr, size_t size);
+void clean_dcache_range(uintptr_t addr, size_t size);
+void inv_dcache_range(uintptr_t addr, size_t size);
+
+void dcsw_op_louis(u_register_t op_type);
+void dcsw_op_all(u_register_t op_type);
+
+void disable_mmu_el1(void);
+void disable_mmu_el3(void);
+void disable_mmu_icache_el1(void);
+void disable_mmu_icache_el3(void);
+
+/*******************************************************************************
+ * Misc. accessor prototypes
+ ******************************************************************************/
+
+#define write_daifclr(val) SYSREG_WRITE_CONST(daifclr, val)
+#define write_daifset(val) SYSREG_WRITE_CONST(daifset, val)
+
+DEFINE_SYSREG_RW_FUNCS(par_el1)
+DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64isar1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1)
+DEFINE_SYSREG_READ_FUNC(CurrentEl)
+DEFINE_SYSREG_READ_FUNC(ctr_el0)
+DEFINE_SYSREG_RW_FUNCS(daif)
+DEFINE_SYSREG_RW_FUNCS(spsr_el1)
+DEFINE_SYSREG_RW_FUNCS(spsr_el2)
+DEFINE_SYSREG_RW_FUNCS(spsr_el3)
+DEFINE_SYSREG_RW_FUNCS(elr_el1)
+DEFINE_SYSREG_RW_FUNCS(elr_el2)
+DEFINE_SYSREG_RW_FUNCS(elr_el3)
+
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
+DEFINE_SYSOP_TYPE_FUNC(dsb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, oshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, osh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nshst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, nsh)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishld)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
+DEFINE_SYSOP_FUNC(isb)
+
+static inline void enable_irq(void)
+{
+       /*
+        * The compiler memory barrier will prevent the compiler from
+        * scheduling non-volatile memory access after the write to the
+        * register.
+        *
+        * This could happen if some initialization code issues non-volatile
+        * accesses to an area used by an interrupt handler, in the assumption
+        * that it is safe as the interrupts are disabled at the time it does
+        * that (according to program order). However, non-volatile accesses
+        * are not necessarily in program order relatively with volatile inline
+        * assembly statements (and volatile accesses).
+        */
+       COMPILER_BARRIER();
+       write_daifclr(DAIF_IRQ_BIT);
+       isb();
+}
+
+static inline void enable_fiq(void)
+{
+       COMPILER_BARRIER();
+       write_daifclr(DAIF_FIQ_BIT);
+       isb();
+}
+
+static inline void enable_serror(void)
+{
+       COMPILER_BARRIER();
+       write_daifclr(DAIF_ABT_BIT);
+       isb();
+}
+
+static inline void enable_debug_exceptions(void)
+{
+       COMPILER_BARRIER();
+       write_daifclr(DAIF_DBG_BIT);
+       isb();
+}
+
+static inline void disable_irq(void)
+{
+       COMPILER_BARRIER();
+       write_daifset(DAIF_IRQ_BIT);
+       isb();
+}
+
+static inline void disable_fiq(void)
+{
+       COMPILER_BARRIER();
+       write_daifset(DAIF_FIQ_BIT);
+       isb();
+}
+
+static inline void disable_serror(void)
+{
+       COMPILER_BARRIER();
+       write_daifset(DAIF_ABT_BIT);
+       isb();
+}
+
+static inline void disable_debug_exceptions(void)
+{
+       COMPILER_BARRIER();
+       write_daifset(DAIF_DBG_BIT);
+       isb();
+}
+
+#if !ERROR_DEPRECATED
+uint32_t get_afflvl_shift(uint32_t);
+uint32_t mpidr_mask_lower_afflvls(uint64_t, uint32_t);
+
+void __dead2 eret(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
+                 uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
+#endif
+void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
+                uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
+
+/*******************************************************************************
+ * System register accessor prototypes
+ ******************************************************************************/
+DEFINE_SYSREG_READ_FUNC(midr_el1)
+DEFINE_SYSREG_READ_FUNC(mpidr_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
+
+DEFINE_SYSREG_RW_FUNCS(scr_el3)
+DEFINE_SYSREG_RW_FUNCS(hcr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vbar_el1)
+DEFINE_SYSREG_RW_FUNCS(vbar_el2)
+DEFINE_SYSREG_RW_FUNCS(vbar_el3)
+
+DEFINE_SYSREG_RW_FUNCS(sctlr_el1)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el2)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(actlr_el1)
+DEFINE_SYSREG_RW_FUNCS(actlr_el2)
+DEFINE_SYSREG_RW_FUNCS(actlr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(esr_el1)
+DEFINE_SYSREG_RW_FUNCS(esr_el2)
+DEFINE_SYSREG_RW_FUNCS(esr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(afsr0_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el3)
+
+DEFINE_SYSREG_RW_FUNCS(afsr1_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el3)
+
+DEFINE_SYSREG_RW_FUNCS(far_el1)
+DEFINE_SYSREG_RW_FUNCS(far_el2)
+DEFINE_SYSREG_RW_FUNCS(far_el3)
+
+DEFINE_SYSREG_RW_FUNCS(mair_el1)
+DEFINE_SYSREG_RW_FUNCS(mair_el2)
+DEFINE_SYSREG_RW_FUNCS(mair_el3)
+
+DEFINE_SYSREG_RW_FUNCS(amair_el1)
+DEFINE_SYSREG_RW_FUNCS(amair_el2)
+DEFINE_SYSREG_RW_FUNCS(amair_el3)
+
+DEFINE_SYSREG_READ_FUNC(rvbar_el1)
+DEFINE_SYSREG_READ_FUNC(rvbar_el2)
+DEFINE_SYSREG_READ_FUNC(rvbar_el3)
+
+DEFINE_SYSREG_RW_FUNCS(rmr_el1)
+DEFINE_SYSREG_RW_FUNCS(rmr_el2)
+DEFINE_SYSREG_RW_FUNCS(rmr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(tcr_el1)
+DEFINE_SYSREG_RW_FUNCS(tcr_el2)
+DEFINE_SYSREG_RW_FUNCS(tcr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el1)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el2)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el3)
+
+DEFINE_SYSREG_RW_FUNCS(ttbr1_el1)
+
+DEFINE_SYSREG_RW_FUNCS(vttbr_el2)
+
+DEFINE_SYSREG_RW_FUNCS(cptr_el2)
+DEFINE_SYSREG_RW_FUNCS(cptr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(cpacr_el1)
+DEFINE_SYSREG_RW_FUNCS(cntfrq_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_tval_el2)
+DEFINE_SYSREG_RW_FUNCS(cnthp_cval_el2)
+DEFINE_SYSREG_RW_FUNCS(cntps_ctl_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_tval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_cval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntp_ctl_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_tval_el0)
+DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
+DEFINE_SYSREG_READ_FUNC(cntpct_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
+
+#define get_cntp_ctl_enable(x)  (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
+                                       CNTP_CTL_ENABLE_MASK)
+#define get_cntp_ctl_imask(x)   (((x) >> CNTP_CTL_IMASK_SHIFT) & \
+                                       CNTP_CTL_IMASK_MASK)
+#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
+                                       CNTP_CTL_ISTATUS_MASK)
+
+#define set_cntp_ctl_enable(x)  ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define set_cntp_ctl_imask(x)   ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
+
+#define clr_cntp_ctl_enable(x)  ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
+#define clr_cntp_ctl_imask(x)   ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+
+DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
+
+DEFINE_SYSREG_RW_FUNCS(cntvoff_el2)
+
+DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
+DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
+
+DEFINE_SYSREG_READ_FUNC(isr_el1)
+
+DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
+DEFINE_SYSREG_RW_FUNCS(mdcr_el3)
+DEFINE_SYSREG_RW_FUNCS(hstr_el2)
+DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
+
+/* GICv3 System Registers */
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_rpr_el1, ICC_RPR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el3, ICC_IGRPEN1_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir0_el1, ICC_HPPIR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir1_el1, ICC_HPPIR1_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar0_el1, ICC_IAR0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar1_el1, ICC_IAR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
+
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
+
+DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
+
+DEFINE_RENAME_SYSREG_READ_FUNC(erxfr_el1, ERXFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxctlr_el1, ERXCTLR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(erxstatus_el1, ERXSTATUS_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
+
+/* Armv8.3 Pointer Authentication Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(apgakeylo_el1, APGAKeyLo_EL1)
+
+#define IS_IN_EL(x) \
+       (GET_EL(read_CurrentEl()) == MODE_EL##x)
+
+#define IS_IN_EL1() IS_IN_EL(1)
+#define IS_IN_EL2() IS_IN_EL(2)
+#define IS_IN_EL3() IS_IN_EL(3)
+
+static inline unsigned int get_current_el(void)
+{
+       return GET_EL(read_CurrentEl());
+}
+
+/*
+ * Check if an EL is implemented from AA64PFR0 register fields.
+ */
+static inline uint64_t el_implemented(unsigned int el)
+{
+       if (el > 3U) {
+               return EL_IMPL_NONE;
+       } else {
+               unsigned int shift = ID_AA64PFR0_EL1_SHIFT * el;
+
+               return (read_id_aa64pfr0_el1() >> shift) & ID_AA64PFR0_ELX_MASK;
+       }
+}
+
+#if !ERROR_DEPRECATED
+#define EL_IMPLEMENTED(_el)    el_implemented(_el)
+#endif
+
+/* Previously defined accesor functions with incomplete register names  */
+
+#define read_current_el()      read_CurrentEl()
+
+#define dsb()                  dsbsy()
+
+#define read_midr()            read_midr_el1()
+
+#define read_mpidr()           read_mpidr_el1()
+
+#define read_scr()             read_scr_el3()
+#define write_scr(_v)          write_scr_el3(_v)
+
+#define read_hcr()             read_hcr_el2()
+#define write_hcr(_v)          write_hcr_el2(_v)
+
+#define read_cpacr()           read_cpacr_el1()
+#define write_cpacr(_v)                write_cpacr_el1(_v)
+
+#endif /* ARCH_HELPERS_H */
diff --git a/include/arch/aarch64/asm_macros.S b/include/arch/aarch64/asm_macros.S
new file mode 100644 (file)
index 0000000..dea3021
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASM_MACROS_S
+#define ASM_MACROS_S
+
+#include <arch.h>
+#include <asm_macros_common.S>
+#include <spinlock.h>
+
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_type) \
+       tlbi    _type; \
+       dsb     ish; \
+       tlbi    _type
+#else
+#define TLB_INVALIDATE(_type) \
+       tlbi    _type
+#endif
+
+
+       .macro  func_prologue
+       stp     x29, x30, [sp, #-0x10]!
+       mov     x29,sp
+       .endm
+
+       .macro  func_epilogue
+       ldp     x29, x30, [sp], #0x10
+       .endm
+
+
+       .macro  dcache_line_size  reg, tmp
+       mrs     \tmp, ctr_el0
+       ubfx    \tmp, \tmp, #16, #4
+       mov     \reg, #4
+       lsl     \reg, \reg, \tmp
+       .endm
+
+
+       .macro  icache_line_size  reg, tmp
+       mrs     \tmp, ctr_el0
+       and     \tmp, \tmp, #0xf
+       mov     \reg, #4
+       lsl     \reg, \reg, \tmp
+       .endm
+
+
+       .macro  smc_check  label
+       mrs     x0, esr_el3
+       ubfx    x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+       cmp     x0, #EC_AARCH64_SMC
+       b.ne    $label
+       .endm
+
+       /*
+        * Declare the exception vector table, enforcing it is aligned on a
+        * 2KB boundary, as required by the ARMv8 architecture.
+        * Use zero bytes as the fill value to be stored in the padding bytes
+        * so that it inserts illegal AArch64 instructions. This increases
+        * security, robustness and potentially facilitates debugging.
+        */
+       .macro vector_base  label, section_name=.vectors
+       .section \section_name, "ax"
+       .align 11, 0
+       \label:
+       .endm
+
+       /*
+        * Create an entry in the exception vector table, enforcing it is
+        * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
+        * Use zero bytes as the fill value to be stored in the padding bytes
+        * so that it inserts illegal AArch64 instructions. This increases
+        * security, robustness and potentially facilitates debugging.
+        */
+       .macro vector_entry  label, section_name=.vectors
+       .cfi_sections .debug_frame
+       .section \section_name, "ax"
+       .align 7, 0
+       .type \label, %function
+       .cfi_startproc
+       \label:
+       .endm
+
+       /*
+        * Add the bytes until fill the full exception vector, whose size is always
+        * 32 instructions. If there are more than 32 instructions in the
+        * exception vector then an error is emitted.
+        */
+       .macro end_vector_entry label
+       .cfi_endproc
+       .fill   \label + (32 * 4) - .
+       .endm
+
+       /*
+        * This macro calculates the base address of the current CPU's MP stack
+        * using the plat_my_core_pos() index, the name of the stack storage
+        * and the size of each stack
+        * Out: X0 = physical address of stack base
+        * Clobber: X30, X1, X2
+        */
+       .macro get_my_mp_stack _name, _size
+       bl      plat_my_core_pos
+       adrp    x2, (\_name + \_size)
+       add     x2, x2, :lo12:(\_name + \_size)
+       mov x1, #\_size
+       madd x0, x0, x1, x2
+       .endm
+
+       /*
+        * This macro calculates the base address of a UP stack using the
+        * name of the stack storage and the size of the stack
+        * Out: X0 = physical address of stack base
+        */
+       .macro get_up_stack _name, _size
+       adrp    x0, (\_name + \_size)
+       add     x0, x0, :lo12:(\_name + \_size)
+       .endm
+
+       /*
+        * Helper macro to generate the best mov/movk combinations according
+        * the value to be moved. The 16 bits from '_shift' are tested and
+        * if not zero, they are moved into '_reg' without affecting
+        * other bits.
+        */
+       .macro _mov_imm16 _reg, _val, _shift
+               .if (\_val >> \_shift) & 0xffff
+                       .if (\_val & (1 << \_shift - 1))
+                               movk    \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+                       .else
+                               mov     \_reg, \_val & (0xffff << \_shift)
+                       .endif
+               .endif
+       .endm
+
+       /*
+        * Helper macro to load arbitrary values into 32 or 64-bit registers
+        * which generates the best mov/movk combinations. Many base addresses
+        * are 64KB aligned the macro will eliminate updating bits 15:0 in
+        * that case
+        */
+       .macro mov_imm _reg, _val
+               .if (\_val) == 0
+                       mov     \_reg, #0
+               .else
+                       _mov_imm16      \_reg, (\_val), 0
+                       _mov_imm16      \_reg, (\_val), 16
+                       _mov_imm16      \_reg, (\_val), 32
+                       _mov_imm16      \_reg, (\_val), 48
+               .endif
+       .endm
+
+       /*
+        * Macro to mark instances where we're jumping to a function and don't
+        * expect a return. To provide the function being jumped to with
+        * additional information, we use 'bl' instruction to jump rather than
+        * 'b'.
+         *
+        * Debuggers infer the location of a call from where LR points to, which
+        * is usually the instruction after 'bl'. If this macro expansion
+        * happens to be the last location in a function, that'll cause the LR
+        * to point a location beyond the function, thereby misleading debugger
+        * back trace. We therefore insert a 'nop' after the function call for
+        * debug builds, unless 'skip_nop' parameter is non-zero.
+        */
+       .macro no_ret _func:req, skip_nop=0
+       bl      \_func
+#if DEBUG
+       .ifeq \skip_nop
+       nop
+       .endif
+#endif
+       .endm
+
+       /*
+        * Reserve space for a spin lock in assembly file.
+        */
+       .macro define_asm_spinlock _name:req
+       .align  SPINLOCK_ASM_ALIGN
+       \_name:
+       .space  SPINLOCK_ASM_SIZE
+       .endm
+
+#if RAS_EXTENSION
+       .macro esb
+       .inst   0xd503221f
+       .endm
+#endif
+
+#endif /* ASM_MACROS_S */
diff --git a/include/arch/aarch64/assert_macros.S b/include/arch/aarch64/assert_macros.S
new file mode 100644 (file)
index 0000000..06371c4
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef ASSERT_MACROS_S
+#define ASSERT_MACROS_S
+
+       /*
+        * Assembler macro to enable asm_assert. Use this macro wherever
+        * assert is required in assembly. Please note that the macro makes
+        * use of label '300' to provide the logic and the caller
+        * should make sure that this label is not used to branch prior
+        * to calling this macro.
+        */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+       .pushsection .rodata.str1.1, "aS" ;\
+       .L_assert_filename: ;\
+                       .string __FILE__ ;\
+       .popsection ;\
+.endif ;\
+       b._cc   300f ;\
+       adr     x0, .L_assert_filename ;\
+       mov     x1, __LINE__ ;\
+       b       asm_assert ;\
+300:
+
+#endif /* ASSERT_MACROS_S */
diff --git a/include/arch/aarch64/console_macros.S b/include/arch/aarch64/console_macros.S
new file mode 100644 (file)
index 0000000..5c88d4f
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef CONSOLE_MACROS_S
+#define CONSOLE_MACROS_S
+
+#include <console.h>
+
+/*
+ * This macro encapsulates the common setup that has to be done at the end of
+ * a console driver's register function. It will register all of the driver's
+ * callbacks in the console_t structure and initialize the flags field (by
+ * default consoles are enabled for the "boot" and "crash" states, this can be
+ * changed after registration with the console_set_scope() function). It ends
+ * with a tail call that will include return to the caller.
+ * REQUIRES console_t pointer in x0 and a valid return address in x30.
+ */
+/*
+ * The USE_FINISH_CONSOLE_REG_2 guard is introduced to allow selection between
+ * the 2 variants of the finish_console_register macro and will be removed
+ * once the deprecated variant is removed.
+ */
+#ifndef USE_FINISH_CONSOLE_REG_2
+#if !ERROR_DEPRECATED
+       /* This version of the macro is deprecated. Use the new version */
+       .macro  finish_console_register _driver
+       /*
+        * Add these weak definitions so we will automatically write a 0 if the
+        * function doesn't exist. I'd rather use .ifdef but that only works if
+        * the function was defined (not just declared .global) above this point
+        * in the file, which we can't guarantee.
+        */
+       .weak console_\_driver\()_putc
+       .weak console_\_driver\()_getc
+       .weak console_\_driver\()_flush
+
+       /* Don't use adrp on weak funcs! See GNU ld bugzilla issue 22589. */
+       ldr     x1, =console_\_driver\()_putc
+       str     x1, [x0, #CONSOLE_T_PUTC]
+       ldr     x1, =console_\_driver\()_getc
+       str     x1, [x0, #CONSOLE_T_GETC]
+       ldr     x1, =console_\_driver\()_flush
+       str     x1, [x0, #CONSOLE_T_FLUSH]
+       mov     x1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
+       str     x1, [x0, #CONSOLE_T_FLAGS]
+       b       console_register
+       .endm
+#endif /* ERROR_DEPRECATED */
+#else /* USE_FINISH_CONSOLE_REG_2 */
+       /* The new version of the macro not using weak references */
+       .macro  finish_console_register _driver, putc=0, getc=0, flush=0
+       /*
+        * If any of the callback is not specified or set as 0, then the
+        * corresponding callback entry in console_t is set to 0.
+        */
+       .ifne \putc
+         adrp  x1, console_\_driver\()_putc
+         add   x1, x1, :lo12:console_\_driver\()_putc
+         str   x1, [x0, #CONSOLE_T_PUTC]
+       .else
+         str   xzr, [x0, #CONSOLE_T_PUTC]
+       .endif
+
+       .ifne \getc
+         adrp  x1, console_\_driver\()_getc
+         add   x1, x1, :lo12:console_\_driver\()_getc
+         str   x1, [x0, #CONSOLE_T_GETC]
+       .else
+         str   xzr, [x0, #CONSOLE_T_GETC]
+       .endif
+
+       .ifne \flush
+         adrp  x1, console_\_driver\()_flush
+         add   x1, x1, :lo12:console_\_driver\()_flush
+         str   x1, [x0, #CONSOLE_T_FLUSH]
+       .else
+         str   xzr, [x0, #CONSOLE_T_FLUSH]
+       .endif
+
+       mov     x1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
+       str     x1, [x0, #CONSOLE_T_FLAGS]
+       b       console_register
+       .endm
+#endif /* USE_FINISH_CONSOLE_REG_2 */
+
+#endif /* CONSOLE_MACROS_S */
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
new file mode 100644 (file)
index 0000000..410aeab
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef EL3_COMMON_MACROS_S
+#define EL3_COMMON_MACROS_S
+
+#include <arch.h>
+#include <asm_macros.S>
+
+       /*
+        * Helper macro to initialise EL3 registers we care about.
+        */
+       .macro el3_arch_init_common
+       /* ---------------------------------------------------------------------
+        * SCTLR_EL3 has already been initialised - read current value before
+        * modifying.
+        *
+        * SCTLR_EL3.I: Enable the instruction cache.
+        *
+        * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
+        *  exception is generated if a load or store instruction executed at
+        *  EL3 uses the SP as the base address and the SP is not aligned to a
+        *  16-byte boundary.
+        *
+        * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
+        *  load or store one or more registers have an alignment check that the
+        *  address being accessed is aligned to the size of the data element(s)
+        *  being accessed.
+        * ---------------------------------------------------------------------
+        */
+       mov     x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+       mrs     x0, sctlr_el3
+       orr     x0, x0, x1
+       msr     sctlr_el3, x0
+       isb
+
+#ifdef IMAGE_BL31
+       /* ---------------------------------------------------------------------
+        * Initialise the per-cpu cache pointer to the CPU.
+        * This is done early to enable crash reporting to have access to crash
+        * stack. Since crash reporting depends on cpu_data to report the
+        * unhandled exception, not doing so can lead to recursive exceptions
+        * due to a NULL TPIDR_EL3.
+        * ---------------------------------------------------------------------
+        */
+       bl      init_cpu_data_ptr
+#endif /* IMAGE_BL31 */
+
+       /* ---------------------------------------------------------------------
+        * Initialise SCR_EL3, setting all fields rather than relying on hw.
+        * All fields are architecturally UNKNOWN on reset. The following fields
+        * do not change during the TF lifetime. The remaining fields are set to
+        * zero here but are updated ahead of transitioning to a lower EL in the
+        * function cm_init_context_common().
+        *
+        * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
+        *  EL2, EL1 and EL0 are not trapped to EL3.
+        *
+        * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
+        *  EL2, EL1 and EL0 are not trapped to EL3.
+        *
+        * SCR_EL3.SIF: Set to one to disable instruction fetches from
+        *  Non-secure memory.
+        *
+        * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
+        *  both Security states and both Execution states.
+        *
+        * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
+        *  to EL3 when executing at any EL.
+        *
+        * SCR_EL3.{API,APK}: For Armv8.3 pointer authentication feature,
+        * disable traps to EL3 when accessing key registers or using pointer
+        * authentication instructions from lower ELs.
+        * ---------------------------------------------------------------------
+        */
+       mov_imm x0, ((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT | \
+                               SCR_API_BIT | SCR_APK_BIT) \
+                       & ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
+       msr     scr_el3, x0
+
+       /* ---------------------------------------------------------------------
+        * Initialise MDCR_EL3, setting all fields rather than relying on hw.
+        * Some fields are architecturally UNKNOWN on reset.
+        *
+        * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
+        *  Debug exceptions, other than Breakpoint Instruction exceptions, are
+        *  disabled from all ELs in Secure state.
+        *
+        * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
+        *  privileged debug from S-EL1.
+        *
+        * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
+        *  access to the powerdown debug registers do not trap to EL3.
+        *
+        * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
+        *  debug registers, other than those registers that are controlled by
+        *  MDCR_EL3.TDOSA.
+        *
+        * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
+        *  accesses to all Performance Monitors registers do not trap to EL3.
+        * ---------------------------------------------------------------------
+        */
+       mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) \
+                       & ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT))
+       msr     mdcr_el3, x0
+
+       /* ---------------------------------------------------------------------
+        * Enable External Aborts and SError Interrupts now that the exception
+        * vectors have been setup.
+        * ---------------------------------------------------------------------
+        */
+       msr     daifclr, #DAIF_ABT_BIT
+
+       /* ---------------------------------------------------------------------
+        * Initialise CPTR_EL3, setting all fields rather than relying on hw.
+        * All fields are architecturally UNKNOWN on reset.
+        *
+        * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
+        *  CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
+        *
+        * CPTR_EL3.TTA: Set to zero so that System register accesses to the
+        *  trace registers do not trap to EL3.
+        *
+        * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
+        *  by Advanced SIMD, floating-point or SVE instructions (if implemented)
+        *  do not trap to EL3.
+        */
+       mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
+       msr     cptr_el3, x0
+
+       /*
+        * If Data Independent Timing (DIT) functionality is implemented,
+        * always enable DIT in EL3
+        */
+       mrs     x0, id_aa64pfr0_el1
+       ubfx    x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
+       cmp     x0, #ID_AA64PFR0_DIT_SUPPORTED
+       bne     1f
+       mov     x0, #DIT_BIT
+       msr     DIT, x0
+1:
+       .endm
+
+/* -----------------------------------------------------------------------------
+ * This is the super set of actions that need to be performed during a cold boot
+ * or a warm boot in EL3. This code is shared by BL1 and BL31.
+ *
+ * This macro will always perform reset handling, architectural initialisations
+ * and stack setup. The rest of the actions are optional because they might not
+ * be needed, depending on the context in which this macro is called. This is
+ * why this macro is parameterised ; each parameter allows to enable/disable
+ * some actions.
+ *
+ *  _init_sctlr:
+ *     Whether the macro needs to initialise SCTLR_EL3, including configuring
+ *      the endianness of data accesses.
+ *
+ *  _warm_boot_mailbox:
+ *     Whether the macro needs to detect the type of boot (cold/warm). The
+ *     detection is based on the platform entrypoint address : if it is zero
+ *     then it is a cold boot, otherwise it is a warm boot. In the latter case,
+ *     this macro jumps on the platform entrypoint address.
+ *
+ *  _secondary_cold_boot:
+ *     Whether the macro needs to identify the CPU that is calling it: primary
+ *     CPU or secondary CPU. The primary CPU will be allowed to carry on with
+ *     the platform initialisations, while the secondaries will be put in a
+ *     platform-specific state in the meantime.
+ *
+ *     If the caller knows this macro will only be called by the primary CPU
+ *     then this parameter can be defined to 0 to skip this step.
+ *
+ * _init_memory:
+ *     Whether the macro needs to initialise the memory.
+ *
+ * _init_c_runtime:
+ *     Whether the macro needs to initialise the C runtime environment.
+ *
+ * _exception_vectors:
+ *     Address of the exception vectors to program in the VBAR_EL3 register.
+ * -----------------------------------------------------------------------------
+ */
+       .macro el3_entrypoint_common                                    \
+               _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,  \
+               _init_memory, _init_c_runtime, _exception_vectors
+
+       .if \_init_sctlr
+               /* -------------------------------------------------------------
+                * This is the initialisation of SCTLR_EL3 and so must ensure
+                * that all fields are explicitly set rather than relying on hw.
+                * Some fields reset to an IMPLEMENTATION DEFINED value and
+                * others are architecturally UNKNOWN on reset.
+                *
+                * SCTLR.EE: Set the CPU endianness before doing anything that
+                *  might involve memory reads or writes. Set to zero to select
+                *  Little Endian.
+                *
+                * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
+                *  force all memory regions that are writeable to be treated as
+                *  XN (Execute-never). Set to zero so that this control has no
+                *  effect on memory access permissions.
+                *
+                * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
+                *
+                * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
+                *
+                * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+                *  safe behaviour upon exception entry to EL3.
+                * -------------------------------------------------------------
+                */
+               mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
+                               | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
+               msr     sctlr_el3, x0
+               isb
+       .endif /* _init_sctlr */
+
+       .if \_warm_boot_mailbox
+               /* -------------------------------------------------------------
+                * This code will be executed for both warm and cold resets.
+                * Now is the time to distinguish between the two.
+                * Query the platform entrypoint address and if it is not zero
+                * then it means it is a warm boot so jump to this address.
+                * -------------------------------------------------------------
+                */
+               bl      plat_get_my_entrypoint
+               cbz     x0, do_cold_boot
+               br      x0
+
+       do_cold_boot:
+       .endif /* _warm_boot_mailbox */
+
+       /* ---------------------------------------------------------------------
+        * Set the exception vectors.
+        * ---------------------------------------------------------------------
+        */
+       adr     x0, \_exception_vectors
+       msr     vbar_el3, x0
+       isb
+
+       /* ---------------------------------------------------------------------
+        * It is a cold boot.
+        * Perform any processor specific actions upon reset e.g. cache, TLB
+        * invalidations etc.
+        * ---------------------------------------------------------------------
+        */
+       bl      reset_handler
+
+       el3_arch_init_common
+
+       .if \_secondary_cold_boot
+               /* -------------------------------------------------------------
+                * Check if this is a primary or secondary CPU cold boot.
+                * The primary CPU will set up the platform while the
+                * secondaries are placed in a platform-specific state until the
+                * primary CPU performs the necessary actions to bring them out
+                * of that state and allows entry into the OS.
+                * -------------------------------------------------------------
+                */
+               bl      plat_is_my_cpu_primary
+               cbnz    w0, do_primary_cold_boot
+
+               /* This is a cold boot on a secondary CPU */
+               bl      plat_secondary_cold_boot_setup
+               /* plat_secondary_cold_boot_setup() is not supposed to return */
+               bl      el3_panic
+
+       do_primary_cold_boot:
+       .endif /* _secondary_cold_boot */
+
+       /* ---------------------------------------------------------------------
+        * Initialize memory now. Secondary CPU initialization won't get to this
+        * point.
+        * ---------------------------------------------------------------------
+        */
+
+       .if \_init_memory
+               bl      platform_mem_init
+       .endif /* _init_memory */
+
+       /* ---------------------------------------------------------------------
+        * Init C runtime environment:
+        *   - Zero-initialise the NOBITS sections. There are 2 of them:
+        *       - the .bss section;
+        *       - the coherent memory section (if any).
+        *   - Relocate the data section from ROM to RAM, if required.
+        * ---------------------------------------------------------------------
+        */
+       .if \_init_c_runtime
+#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
+               /* -------------------------------------------------------------
+                * Invalidate the RW memory used by the BL31 image. This
+                * includes the data and NOBITS sections. This is done to
+                * safeguard against possible corruption of this memory by
+                * dirty cache lines in a system cache as a result of use by
+                * an earlier boot loader stage.
+                * -------------------------------------------------------------
+                */
+               adrp    x0, __RW_START__
+               add     x0, x0, :lo12:__RW_START__
+               adrp    x1, __RW_END__
+               add     x1, x1, :lo12:__RW_END__
+               sub     x1, x1, x0
+               bl      inv_dcache_range
+#endif
+               adrp    x0, __BSS_START__
+               add     x0, x0, :lo12:__BSS_START__
+
+               adrp    x1, __BSS_END__
+               add     x1, x1, :lo12:__BSS_END__
+               sub     x1, x1, x0
+               bl      zeromem
+
+#if USE_COHERENT_MEM
+               adrp    x0, __COHERENT_RAM_START__
+               add     x0, x0, :lo12:__COHERENT_RAM_START__
+               adrp    x1, __COHERENT_RAM_END_UNALIGNED__
+               add     x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
+               sub     x1, x1, x0
+               bl      zeromem
+#endif
+
+#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_IN_XIP_MEM)
+               adrp    x0, __DATA_RAM_START__
+               add     x0, x0, :lo12:__DATA_RAM_START__
+               adrp    x1, __DATA_ROM_START__
+               add     x1, x1, :lo12:__DATA_ROM_START__
+               adrp    x2, __DATA_RAM_END__
+               add     x2, x2, :lo12:__DATA_RAM_END__
+               sub     x2, x2, x0
+               bl      memcpy16
+#endif
+       .endif /* _init_c_runtime */
+
+       /* ---------------------------------------------------------------------
+        * Use SP_EL0 for the C runtime stack.
+        * ---------------------------------------------------------------------
+        */
+       msr     spsel, #0
+
+       /* ---------------------------------------------------------------------
+        * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
+        * the MMU is enabled. There is no risk of reading stale stack memory
+        * after enabling the MMU as only the primary CPU is running at the
+        * moment.
+        * ---------------------------------------------------------------------
+        */
+       bl      plat_set_my_stack
+
+#if STACK_PROTECTOR_ENABLED
+       .if \_init_c_runtime
+       bl      update_stack_protector_canary
+       .endif /* _init_c_runtime */
+#endif
+       .endm
+
+#endif /* EL3_COMMON_MACROS_S */
diff --git a/include/arch/aarch64/setjmp.h b/include/arch/aarch64/setjmp.h
new file mode 100644 (file)
index 0000000..bbfe1df
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SETJMP_H
+#define SETJMP_H
+
+#define JMP_CTX_X19    0x0
+#define JMP_CTX_X21    0x10
+#define JMP_CTX_X23    0x20
+#define JMP_CTX_X25    0x30
+#define JMP_CTX_X27    0x40
+#define JMP_CTX_X29    0x50
+#define JMP_CTX_SP     0x60
+#define JMP_CTX_END    0x70
+
+#define JMP_SIZE       (JMP_CTX_END >> 3)
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+
+/* Jump buffer hosting x18 - x30 and sp_el0 registers */
+struct jmpbuf {
+       uint64_t buf[JMP_SIZE];
+} __aligned(16);
+
+
+/*
+ * Set a jump point, and populate the jump buffer with context information so
+ * that longjmp() can jump later. The caller must adhere to the following
+ * conditions:
+ *
+ *  - After calling this function, the stack must not be shrunk. The contents of
+ *    the stack must not be changed either.
+ *
+ *  - If the caller were to 'return', the buffer must be considered invalid, and
+ *    must not be used with longjmp().
+ *
+ * The caller will observe this function returning at two distinct
+ * circumstances, each with different return values:
+ *
+ *  - Zero, when the buffer is setup;
+ *
+ *  - Non-zero, when a call to longjmp() is made (presumably by one of the
+ *    callee functions) with the same jump buffer.
+ */
+int setjmp(struct jmpbuf *buf);
+
+/*
+ * Reset execution to a jump point, and restore context information according to
+ * the jump buffer populated by setjmp().
+ */
+void longjmp(struct jmpbuf *buf);
+
+#endif /* __ASSEMBLY__ */
+#endif /* SETJMP_H */
diff --git a/include/arch/aarch64/smccc_helpers.h b/include/arch/aarch64/smccc_helpers.h
new file mode 100644 (file)
index 0000000..efab18b
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SMCCC_HELPERS_H
+#define SMCCC_HELPERS_H
+
+#include <smccc.h>
+
+#ifndef __ASSEMBLY__
+#include <context.h>
+#include <stdbool.h>
+
+/* Convenience macros to return from SMC handler */
+#define SMC_RET0(_h)   {                                       \
+       return (uint64_t) (_h);                                 \
+}
+#define SMC_RET1(_h, _x0)      {                               \
+       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X0), (_x0));     \
+       SMC_RET0(_h);                                           \
+}
+#define SMC_RET2(_h, _x0, _x1) {                               \
+       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X1), (_x1));     \
+       SMC_RET1(_h, (_x0));                                    \
+}
+#define SMC_RET3(_h, _x0, _x1, _x2)    {                       \
+       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X2), (_x2));     \
+       SMC_RET2(_h, (_x0), (_x1));                             \
+}
+#define SMC_RET4(_h, _x0, _x1, _x2, _x3)       {               \
+       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X3), (_x3));     \
+       SMC_RET3(_h, (_x0), (_x1), (_x2));                      \
+}
+#define SMC_RET5(_h, _x0, _x1, _x2, _x3, _x4)  {               \
+       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X4), (_x4));     \
+       SMC_RET4(_h, (_x0), (_x1), (_x2), (_x3));               \
+}
+#define SMC_RET6(_h, _x0, _x1, _x2, _x3, _x4, _x5)     {       \
+       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X5), (_x5));     \
+       SMC_RET5(_h, (_x0), (_x1), (_x2), (_x3), (_x4));        \
+}
+#define SMC_RET7(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6)        {       \
+       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X6), (_x6));     \
+       SMC_RET6(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5)); \
+}
+#define SMC_RET8(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6, _x7) { \
+       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X7), (_x7));     \
+       SMC_RET7(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5), (_x6));  \
+}
+
+/*
+ * Convenience macros to access general purpose registers using handle provided
+ * to SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_GP(_h, _g)                                     \
+       read_ctx_reg((get_gpregs_ctx(_h)), (_g))
+#define SMC_SET_GP(_h, _g, _v)                                 \
+       write_ctx_reg((get_gpregs_ctx(_h)), (_g), (_v))
+
+/*
+ * Convenience macros to access EL3 context registers using handle provided to
+ * SMC handler. These take the offset values defined in context.h
+ */
+#define SMC_GET_EL3(_h, _e)                                    \
+       read_ctx_reg((get_el3state_ctx(_h)), (_e))
+#define SMC_SET_EL3(_h, _e, _v)                                        \
+       write_ctx_reg((get_el3state_ctx(_h)), (_e), (_v))
+
+/*
+ * Helper macro to retrieve the SMC parameters from cpu_context_t.
+ */
+#define get_smc_params_from_ctx(_hdl, _x1, _x2, _x3, _x4)      \
+       do {                                                    \
+               const gp_regs_t *regs = get_gpregs_ctx(_hdl);   \
+               _x1 = read_ctx_reg(regs, CTX_GPREG_X1);         \
+               _x2 = read_ctx_reg(regs, CTX_GPREG_X2);         \
+               _x3 = read_ctx_reg(regs, CTX_GPREG_X3);         \
+               _x4 = read_ctx_reg(regs, CTX_GPREG_X4);         \
+       } while (false)
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* SMCCC_HELPERS_H */
diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S
deleted file mode 100644 (file)
index c54f75c..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef ASM_MACROS_S
-#define ASM_MACROS_S
-
-#include <arch.h>
-#include <asm_macros_common.S>
-#include <spinlock.h>
-
-/*
- * TLBI instruction with type specifier that implements the workaround for
- * errata 813419 of Cortex-A57.
- */
-#if ERRATA_A57_813419
-#define TLB_INVALIDATE(_reg, _coproc) \
-       stcopr  _reg, _coproc; \
-       dsb     ish; \
-       stcopr  _reg, _coproc
-#else
-#define TLB_INVALIDATE(_reg, _coproc) \
-       stcopr  _reg, _coproc
-#endif
-
-#define WORD_SIZE      4
-
-       /*
-        * Co processor register accessors
-        */
-       .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
-       mrc     \coproc, \opc1, \reg, \CRn, \CRm, \opc2
-       .endm
-
-       .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
-       mrrc    \coproc, \opc1, \reg1, \reg2, \CRm
-       .endm
-
-       .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
-       mcr     \coproc, \opc1, \reg, \CRn, \CRm, \opc2
-       .endm
-
-       .macro stcopr16 reg1, reg2, coproc, opc1, CRm
-       mcrr    \coproc, \opc1, \reg1, \reg2, \CRm
-       .endm
-
-       /* Cache line size helpers */
-       .macro  dcache_line_size  reg, tmp
-       ldcopr  \tmp, CTR
-       ubfx    \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
-       mov     \reg, #WORD_SIZE
-       lsl     \reg, \reg, \tmp
-       .endm
-
-       .macro  icache_line_size  reg, tmp
-       ldcopr  \tmp, CTR
-       and     \tmp, \tmp, #CTR_IMINLINE_MASK
-       mov     \reg, #WORD_SIZE
-       lsl     \reg, \reg, \tmp
-       .endm
-
-       /*
-        * Declare the exception vector table, enforcing it is aligned on a
-        * 32 byte boundary.
-        */
-       .macro vector_base  label
-       .section .vectors, "ax"
-       .align 5
-       \label:
-       .endm
-
-       /*
-        * This macro calculates the base address of the current CPU's multi
-        * processor(MP) stack using the plat_my_core_pos() index, the name of
-        * the stack storage and the size of each stack.
-        * Out: r0 = physical address of stack base
-        * Clobber: r14, r1, r2
-        */
-       .macro get_my_mp_stack _name, _size
-       bl  plat_my_core_pos
-       ldr r2, =(\_name + \_size)
-       mov r1, #\_size
-       mla r0, r0, r1, r2
-       .endm
-
-       /*
-        * This macro calculates the base address of a uniprocessor(UP) stack
-        * using the name of the stack storage and the size of the stack
-        * Out: r0 = physical address of stack base
-        */
-       .macro get_up_stack _name, _size
-       ldr r0, =(\_name + \_size)
-       .endm
-
-#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
-       /*
-        * ARMv7 cores without Virtualization extension do not support the
-        * eret instruction.
-        */
-       .macro eret
-       movs    pc, lr
-       .endm
-#endif
-
-#if (ARM_ARCH_MAJOR == 7)
-       /* ARMv7 does not support stl instruction */
-       .macro stl _reg, _write_lock
-       dmb
-       str     \_reg, \_write_lock
-       dsb
-       .endm
-#endif
-
-       /*
-        * Helper macro to generate the best mov/movw/movt combinations
-        * according to the value to be moved.
-        */
-       .macro mov_imm _reg, _val
-               .if ((\_val) & 0xffff0000) == 0
-                       mov     \_reg, #(\_val)
-               .else
-                       movw    \_reg, #((\_val) & 0xffff)
-                       movt    \_reg, #((\_val) >> 16)
-               .endif
-       .endm
-
-       /*
-        * Macro to mark instances where we're jumping to a function and don't
-        * expect a return. To provide the function being jumped to with
-        * additional information, we use 'bl' instruction to jump rather than
-        * 'b'.
-         *
-        * Debuggers infer the location of a call from where LR points to, which
-        * is usually the instruction after 'bl'. If this macro expansion
-        * happens to be the last location in a function, that'll cause the LR
-        * to point a location beyond the function, thereby misleading debugger
-        * back trace. We therefore insert a 'nop' after the function call for
-        * debug builds, unless 'skip_nop' parameter is non-zero.
-        */
-       .macro no_ret _func:req, skip_nop=0
-       bl      \_func
-#if DEBUG
-       .ifeq \skip_nop
-       nop
-       .endif
-#endif
-       .endm
-
-       /*
-        * Reserve space for a spin lock in assembly file.
-        */
-       .macro define_asm_spinlock _name:req
-       .align  SPINLOCK_ASM_ALIGN
-       \_name:
-       .space  SPINLOCK_ASM_SIZE
-       .endm
-
-       /*
-        * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
-        * and the top 32 bits of `_val` into `_reg_h`.  If either the bottom
-        * or top word of `_val` is zero, the corresponding OR operation
-        * is skipped.
-        */
-       .macro orr64_imm _reg_l, _reg_h, _val
-               .if (\_val >> 32)
-                       orr \_reg_h, \_reg_h, #(\_val >> 32)
-               .endif
-               .if (\_val & 0xffffffff)
-                       orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
-               .endif
-       .endm
-
-       /*
-        * Helper macro to bitwise-clear bits in `_reg_l` and
-        * `_reg_h` given a 64 bit immediate `_val`.  The set bits
-        * in the bottom word of `_val` dictate which bits from
-        * `_reg_l` should be cleared.  Similarly, the set bits in
-        * the top word of `_val` dictate which bits from `_reg_h`
-        * should be cleared.  If either the bottom or top word of
-        * `_val` is zero, the corresponding BIC operation is skipped.
-        */
-       .macro bic64_imm _reg_l, _reg_h, _val
-               .if (\_val >> 32)
-                       bic \_reg_h, \_reg_h, #(\_val >> 32)
-               .endif
-               .if (\_val & 0xffffffff)
-                       bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
-               .endif
-       .endm
-
-#endif /* ASM_MACROS_S */
diff --git a/include/common/aarch32/assert_macros.S b/include/common/aarch32/assert_macros.S
deleted file mode 100644 (file)
index ab3a2eb..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef ASSERT_MACROS_S
-#define ASSERT_MACROS_S
-
-       /*
-        * Assembler macro to enable asm_assert. We assume that the stack is
-        * initialized prior to invoking this macro.
-        */
-#define ASM_ASSERT(_cc) \
-.ifndef .L_assert_filename ;\
-       .pushsection .rodata.str1.1, "aS" ;\
-       .L_assert_filename: ;\
-                       .string __FILE__ ;\
-       .popsection ;\
-.endif ;\
-       b##_cc  300f ;\
-       ldr     r0, =.L_assert_filename ;\
-       ldr     r1, =__LINE__ ;\
-       b       asm_assert;\
-300:
-
-#endif /* ASSERT_MACROS_S */
diff --git a/include/common/aarch32/console_macros.S b/include/common/aarch32/console_macros.S
deleted file mode 100644 (file)
index ba6e7d0..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef CONSOLE_MACROS_S
-#define CONSOLE_MACROS_S
-
-#include <console.h>
-
-/*
- * This macro encapsulates the common setup that has to be done at the end of
- * a console driver's register function. It will register all of the driver's
- * callbacks in the console_t structure and initialize the flags field (by
- * default consoles are enabled for the "boot" and "crash" states, this can be
- * changed after registration with the console_set_scope() function). It ends
- * with a tail call that will include return to the caller.
- * REQUIRES console_t pointer in x0 and a valid return address in x30.
- */
-/*
- * The USE_FINISH_CONSOLE_REG_2 guard is introduced to allow selection between
- * the 2 variants of the finish_console_register macro and will be removed
- * once the deprecated variant is removed.
- */
-#ifndef USE_FINISH_CONSOLE_REG_2
-#if !ERROR_DEPRECATED
-       /* This version of the macro is deprecated. Use the new version */
-       .macro  finish_console_register _driver
-       /*
-        * Add these weak definitions so we will automatically write a 0 if the
-        * function doesn't exist. I'd rather use .ifdef but that only works if
-        * the function was defined (not just declared .global) above this point
-        * in the file, which we can't guarantee.
-        */
-       .weak console_\_driver\()_putc
-       .weak console_\_driver\()_getc
-       .weak console_\_driver\()_flush
-
-       /* Don't use adrp on weak funcs! See GNU ld bugzilla issue 22589. */
-       ldr     r1, =console_\_driver\()_putc
-       str     r1, [r0, #CONSOLE_T_PUTC]
-       ldr     r1, =console_\_driver\()_getc
-       str     r1, [r0, #CONSOLE_T_GETC]
-       ldr     r1, =console_\_driver\()_flush
-       str     r1, [r0, #CONSOLE_T_FLUSH]
-       mov     r1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
-       str     r1, [r0, #CONSOLE_T_FLAGS]
-       b       console_register
-       .endm
-#endif /* ERROR_DEPRECATED */
-#else /* USE_FINISH_CONSOLE_REG_2 */
-       /* The new version of the macro not using weak references */
-       .macro  finish_console_register _driver, putc=0, getc=0, flush=0
-       /*
-        * If any of the callback is not specified or set as 0, then the
-        * corresponding callback entry in console_t is set to 0.
-        */
-       .ifne \putc
-         ldr   r1, =console_\_driver\()_putc
-       .else
-         mov   r1, #0
-       .endif
-       str     r1, [r0, #CONSOLE_T_PUTC]
-
-       .ifne \getc
-         ldr   r1, =console_\_driver\()_getc
-       .else
-         mov   r1, #0
-       .endif
-       str     r1, [r0, #CONSOLE_T_GETC]
-
-       .ifne \flush
-         ldr   r1, =console_\_driver\()_flush
-       .else
-         mov   r1, #0
-       .endif
-       str     r1, [r0, #CONSOLE_T_FLUSH]
-
-       mov     r1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
-       str     r1, [r0, #CONSOLE_T_FLAGS]
-       b       console_register
-       .endm
-#endif /* USE_FINISH_CONSOLE_REG_2 */
-#endif /* CONSOLE_MACROS_S */
diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S
deleted file mode 100644 (file)
index 048f161..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef EL3_COMMON_MACROS_S
-#define EL3_COMMON_MACROS_S
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <assert_macros.S>
-
-       /*
-        * Helper macro to initialise EL3 registers we care about.
-        */
-       .macro el3_arch_init_common
-       /* ---------------------------------------------------------------------
-        * SCTLR has already been initialised - read current value before
-        * modifying.
-        *
-        * SCTLR.I: Enable the instruction cache.
-        *
-        * SCTLR.A: Enable Alignment fault checking. All instructions that load
-        *  or store one or more registers have an alignment check that the
-        *  address being accessed is aligned to the size of the data element(s)
-        *  being accessed.
-        * ---------------------------------------------------------------------
-        */
-       ldr     r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
-       ldcopr  r0, SCTLR
-       orr     r0, r0, r1
-       stcopr  r0, SCTLR
-       isb
-
-       /* ---------------------------------------------------------------------
-        * Initialise SCR, setting all fields rather than relying on the hw.
-        *
-        * SCR.SIF: Enabled so that Secure state instruction fetches from
-        *  Non-secure memory are not permitted.
-        * ---------------------------------------------------------------------
-        */
-       ldr     r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
-       stcopr  r0, SCR
-
-       /* -----------------------------------------------------
-        * Enable the Asynchronous data abort now that the
-        * exception vectors have been setup.
-        * -----------------------------------------------------
-        */
-       cpsie   a
-       isb
-
-       /* ---------------------------------------------------------------------
-        * Initialise NSACR, setting all the fields, except for the
-        * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
-        * fields are architecturally UNKNOWN on reset.
-        *
-        * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
-        *  cp11 field is ignored, but is set to same value as cp10. The cp10
-        *  field is set to allow access to Advanced SIMD and floating point
-        *  features from both Security states.
-        * ---------------------------------------------------------------------
-        */
-       ldcopr  r0, NSACR
-       and     r0, r0, #NSACR_IMP_DEF_MASK
-       orr     r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
-       stcopr  r0, NSACR
-       isb
-
-       /* ---------------------------------------------------------------------
-        * Initialise CPACR, setting all fields rather than relying on hw. Some
-        * fields are architecturally UNKNOWN on reset.
-        *
-        * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
-        *  to trace registers. Set to zero to allow access.
-        *
-        * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
-        *  cp11 field is ignored, but is set to same value as cp10. The cp10
-        *  field is set to allow full access from PL0 and PL1 to floating-point
-        *  and Advanced SIMD features.
-        * ---------------------------------------------------------------------
-        */
-       ldr     r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
-       stcopr  r0, CPACR
-       isb
-
-       /* ---------------------------------------------------------------------
-        * Initialise FPEXC, setting all fields rather than relying on hw. Some
-        * fields are architecturally UNKNOWN on reset and are set to zero
-        * except for field(s) listed below.
-        *
-        * FPEXC.EN: Enable access to Advanced SIMD and floating point features
-        *  from all exception levels.
-        * ---------------------------------------------------------------------
-        */
-       ldr     r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
-       vmsr    FPEXC, r0
-       isb
-
-#if (ARM_ARCH_MAJOR > 7)
-       /* ---------------------------------------------------------------------
-        * Initialise SDCR, setting all the fields rather than relying on hw.
-        *
-        * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
-        * Secure EL1 are disabled.
-        * ---------------------------------------------------------------------
-        */
-       ldr     r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE))
-       stcopr  r0, SDCR
-#endif
-
-       /*
-        * If Data Independent Timing (DIT) functionality is implemented,
-        * always enable DIT in EL3
-        */
-       ldcopr  r0, ID_PFR0
-       and     r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
-       cmp     r0, #ID_PFR0_DIT_SUPPORTED
-       bne     1f
-       mrs     r0, cpsr
-       orr     r0, r0, #CPSR_DIT_BIT
-       msr     cpsr_cxsf, r0
-1:
-       .endm
-
-/* -----------------------------------------------------------------------------
- * This is the super set of actions that need to be performed during a cold boot
- * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
- *
- * This macro will always perform reset handling, architectural initialisations
- * and stack setup. The rest of the actions are optional because they might not
- * be needed, depending on the context in which this macro is called. This is
- * why this macro is parameterised ; each parameter allows to enable/disable
- * some actions.
- *
- *  _init_sctlr:
- *     Whether the macro needs to initialise the SCTLR register including
- *     configuring the endianness of data accesses.
- *
- *  _warm_boot_mailbox:
- *     Whether the macro needs to detect the type of boot (cold/warm). The
- *     detection is based on the platform entrypoint address : if it is zero
- *     then it is a cold boot, otherwise it is a warm boot. In the latter case,
- *     this macro jumps on the platform entrypoint address.
- *
- *  _secondary_cold_boot:
- *     Whether the macro needs to identify the CPU that is calling it: primary
- *     CPU or secondary CPU. The primary CPU will be allowed to carry on with
- *     the platform initialisations, while the secondaries will be put in a
- *     platform-specific state in the meantime.
- *
- *     If the caller knows this macro will only be called by the primary CPU
- *     then this parameter can be defined to 0 to skip this step.
- *
- * _init_memory:
- *     Whether the macro needs to initialise the memory.
- *
- * _init_c_runtime:
- *     Whether the macro needs to initialise the C runtime environment.
- *
- * _exception_vectors:
- *     Address of the exception vectors to program in the VBAR_EL3 register.
- * -----------------------------------------------------------------------------
- */
-       .macro el3_entrypoint_common                                    \
-               _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,  \
-               _init_memory, _init_c_runtime, _exception_vectors
-
-       /* Make sure we are in Secure Mode */
-#if ENABLE_ASSERTIONS
-       ldcopr  r0, SCR
-       tst     r0, #SCR_NS_BIT
-       ASM_ASSERT(eq)
-#endif
-
-       .if \_init_sctlr
-               /* -------------------------------------------------------------
-                * This is the initialisation of SCTLR and so must ensure that
-                * all fields are explicitly set rather than relying on hw. Some
-                * fields reset to an IMPLEMENTATION DEFINED value.
-                *
-                * SCTLR.TE: Set to zero so that exceptions to an Exception
-                *  Level executing at PL1 are taken to A32 state.
-                *
-                * SCTLR.EE: Set the CPU endianness before doing anything that
-                *  might involve memory reads or writes. Set to zero to select
-                *  Little Endian.
-                *
-                * SCTLR.V: Set to zero to select the normal exception vectors
-                *  with base address held in VBAR.
-                *
-                * SCTLR.DSSBS: Set to zero to disable speculation store bypass
-                *  safe behaviour upon exception entry to EL3.
-                * -------------------------------------------------------------
-                */
-               ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
-                               SCTLR_V_BIT | SCTLR_DSSBS_BIT))
-               stcopr  r0, SCTLR
-               isb
-       .endif /* _init_sctlr */
-
-       /* Switch to monitor mode */
-       cps     #MODE32_mon
-       isb
-
-       .if \_warm_boot_mailbox
-               /* -------------------------------------------------------------
-                * This code will be executed for both warm and cold resets.
-                * Now is the time to distinguish between the two.
-                * Query the platform entrypoint address and if it is not zero
-                * then it means it is a warm boot so jump to this address.
-                * -------------------------------------------------------------
-                */
-               bl      plat_get_my_entrypoint
-               cmp     r0, #0
-               bxne    r0
-       .endif /* _warm_boot_mailbox */
-
-       /* ---------------------------------------------------------------------
-        * Set the exception vectors (VBAR/MVBAR).
-        * ---------------------------------------------------------------------
-        */
-       ldr     r0, =\_exception_vectors
-       stcopr  r0, VBAR
-       stcopr  r0, MVBAR
-       isb
-
-       /* ---------------------------------------------------------------------
-        * It is a cold boot.
-        * Perform any processor specific actions upon reset e.g. cache, TLB
-        * invalidations etc.
-        * ---------------------------------------------------------------------
-        */
-       bl      reset_handler
-
-       el3_arch_init_common
-
-       .if \_secondary_cold_boot
-               /* -------------------------------------------------------------
-                * Check if this is a primary or secondary CPU cold boot.
-                * The primary CPU will set up the platform while the
-                * secondaries are placed in a platform-specific state until the
-                * primary CPU performs the necessary actions to bring them out
-                * of that state and allows entry into the OS.
-                * -------------------------------------------------------------
-                */
-               bl      plat_is_my_cpu_primary
-               cmp     r0, #0
-               bne     do_primary_cold_boot
-
-               /* This is a cold boot on a secondary CPU */
-               bl      plat_secondary_cold_boot_setup
-               /* plat_secondary_cold_boot_setup() is not supposed to return */
-               no_ret  plat_panic_handler
-
-       do_primary_cold_boot:
-       .endif /* _secondary_cold_boot */
-
-       /* ---------------------------------------------------------------------
-        * Initialize memory now. Secondary CPU initialization won't get to this
-        * point.
-        * ---------------------------------------------------------------------
-        */
-
-       .if \_init_memory
-               bl      platform_mem_init
-       .endif /* _init_memory */
-
-       /* ---------------------------------------------------------------------
-        * Init C runtime environment:
-        *   - Zero-initialise the NOBITS sections. There are 2 of them:
-        *       - the .bss section;
-        *       - the coherent memory section (if any).
-        *   - Relocate the data section from ROM to RAM, if required.
-        * ---------------------------------------------------------------------
-        */
-       .if \_init_c_runtime
-#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
-               /* -----------------------------------------------------------------
-                * Invalidate the RW memory used by the image. This
-                * includes the data and NOBITS sections. This is done to
-                * safeguard against possible corruption of this memory by
-                * dirty cache lines in a system cache as a result of use by
-                * an earlier boot loader stage.
-                * -----------------------------------------------------------------
-                */
-               ldr     r0, =__RW_START__
-               ldr     r1, =__RW_END__
-               sub     r1, r1, r0
-               bl      inv_dcache_range
-#endif
-
-               ldr     r0, =__BSS_START__
-               ldr     r1, =__BSS_SIZE__
-               bl      zeromem
-
-#if USE_COHERENT_MEM
-               ldr     r0, =__COHERENT_RAM_START__
-               ldr     r1, =__COHERENT_RAM_UNALIGNED_SIZE__
-               bl      zeromem
-#endif
-
-#ifdef IMAGE_BL1
-               /* -----------------------------------------------------
-                * Copy data from ROM to RAM.
-                * -----------------------------------------------------
-                */
-               ldr     r0, =__DATA_RAM_START__
-               ldr     r1, =__DATA_ROM_START__
-               ldr     r2, =__DATA_SIZE__
-               bl      memcpy4
-#endif
-       .endif /* _init_c_runtime */
-
-       /* ---------------------------------------------------------------------
-        * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
-        * the MMU is enabled. There is no risk of reading stale stack memory
-        * after enabling the MMU as only the primary CPU is running at the
-        * moment.
-        * ---------------------------------------------------------------------
-        */
-       bl      plat_set_my_stack
-
-#if STACK_PROTECTOR_ENABLED
-       .if \_init_c_runtime
-       bl      update_stack_protector_canary
-       .endif /* _init_c_runtime */
-#endif
-       .endm
-
-#endif /* EL3_COMMON_MACROS_S */
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
deleted file mode 100644 (file)
index dea3021..0000000
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef ASM_MACROS_S
-#define ASM_MACROS_S
-
-#include <arch.h>
-#include <asm_macros_common.S>
-#include <spinlock.h>
-
-/*
- * TLBI instruction with type specifier that implements the workaround for
- * errata 813419 of Cortex-A57.
- */
-#if ERRATA_A57_813419
-#define TLB_INVALIDATE(_type) \
-       tlbi    _type; \
-       dsb     ish; \
-       tlbi    _type
-#else
-#define TLB_INVALIDATE(_type) \
-       tlbi    _type
-#endif
-
-
-       .macro  func_prologue
-       stp     x29, x30, [sp, #-0x10]!
-       mov     x29,sp
-       .endm
-
-       .macro  func_epilogue
-       ldp     x29, x30, [sp], #0x10
-       .endm
-
-
-       .macro  dcache_line_size  reg, tmp
-       mrs     \tmp, ctr_el0
-       ubfx    \tmp, \tmp, #16, #4
-       mov     \reg, #4
-       lsl     \reg, \reg, \tmp
-       .endm
-
-
-       .macro  icache_line_size  reg, tmp
-       mrs     \tmp, ctr_el0
-       and     \tmp, \tmp, #0xf
-       mov     \reg, #4
-       lsl     \reg, \reg, \tmp
-       .endm
-
-
-       .macro  smc_check  label
-       mrs     x0, esr_el3
-       ubfx    x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
-       cmp     x0, #EC_AARCH64_SMC
-       b.ne    $label
-       .endm
-
-       /*
-        * Declare the exception vector table, enforcing it is aligned on a
-        * 2KB boundary, as required by the ARMv8 architecture.
-        * Use zero bytes as the fill value to be stored in the padding bytes
-        * so that it inserts illegal AArch64 instructions. This increases
-        * security, robustness and potentially facilitates debugging.
-        */
-       .macro vector_base  label, section_name=.vectors
-       .section \section_name, "ax"
-       .align 11, 0
-       \label:
-       .endm
-
-       /*
-        * Create an entry in the exception vector table, enforcing it is
-        * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
-        * Use zero bytes as the fill value to be stored in the padding bytes
-        * so that it inserts illegal AArch64 instructions. This increases
-        * security, robustness and potentially facilitates debugging.
-        */
-       .macro vector_entry  label, section_name=.vectors
-       .cfi_sections .debug_frame
-       .section \section_name, "ax"
-       .align 7, 0
-       .type \label, %function
-       .cfi_startproc
-       \label:
-       .endm
-
-       /*
-        * Add the bytes until fill the full exception vector, whose size is always
-        * 32 instructions. If there are more than 32 instructions in the
-        * exception vector then an error is emitted.
-        */
-       .macro end_vector_entry label
-       .cfi_endproc
-       .fill   \label + (32 * 4) - .
-       .endm
-
-       /*
-        * This macro calculates the base address of the current CPU's MP stack
-        * using the plat_my_core_pos() index, the name of the stack storage
-        * and the size of each stack
-        * Out: X0 = physical address of stack base
-        * Clobber: X30, X1, X2
-        */
-       .macro get_my_mp_stack _name, _size
-       bl      plat_my_core_pos
-       adrp    x2, (\_name + \_size)
-       add     x2, x2, :lo12:(\_name + \_size)
-       mov x1, #\_size
-       madd x0, x0, x1, x2
-       .endm
-
-       /*
-        * This macro calculates the base address of a UP stack using the
-        * name of the stack storage and the size of the stack
-        * Out: X0 = physical address of stack base
-        */
-       .macro get_up_stack _name, _size
-       adrp    x0, (\_name + \_size)
-       add     x0, x0, :lo12:(\_name + \_size)
-       .endm
-
-       /*
-        * Helper macro to generate the best mov/movk combinations according
-        * the value to be moved. The 16 bits from '_shift' are tested and
-        * if not zero, they are moved into '_reg' without affecting
-        * other bits.
-        */
-       .macro _mov_imm16 _reg, _val, _shift
-               .if (\_val >> \_shift) & 0xffff
-                       .if (\_val & (1 << \_shift - 1))
-                               movk    \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
-                       .else
-                               mov     \_reg, \_val & (0xffff << \_shift)
-                       .endif
-               .endif
-       .endm
-
-       /*
-        * Helper macro to load arbitrary values into 32 or 64-bit registers
-        * which generates the best mov/movk combinations. Many base addresses
-        * are 64KB aligned the macro will eliminate updating bits 15:0 in
-        * that case
-        */
-       .macro mov_imm _reg, _val
-               .if (\_val) == 0
-                       mov     \_reg, #0
-               .else
-                       _mov_imm16      \_reg, (\_val), 0
-                       _mov_imm16      \_reg, (\_val), 16
-                       _mov_imm16      \_reg, (\_val), 32
-                       _mov_imm16      \_reg, (\_val), 48
-               .endif
-       .endm
-
-       /*
-        * Macro to mark instances where we're jumping to a function and don't
-        * expect a return. To provide the function being jumped to with
-        * additional information, we use 'bl' instruction to jump rather than
-        * 'b'.
-         *
-        * Debuggers infer the location of a call from where LR points to, which
-        * is usually the instruction after 'bl'. If this macro expansion
-        * happens to be the last location in a function, that'll cause the LR
-        * to point a location beyond the function, thereby misleading debugger
-        * back trace. We therefore insert a 'nop' after the function call for
-        * debug builds, unless 'skip_nop' parameter is non-zero.
-        */
-       .macro no_ret _func:req, skip_nop=0
-       bl      \_func
-#if DEBUG
-       .ifeq \skip_nop
-       nop
-       .endif
-#endif
-       .endm
-
-       /*
-        * Reserve space for a spin lock in assembly file.
-        */
-       .macro define_asm_spinlock _name:req
-       .align  SPINLOCK_ASM_ALIGN
-       \_name:
-       .space  SPINLOCK_ASM_SIZE
-       .endm
-
-#if RAS_EXTENSION
-       .macro esb
-       .inst   0xd503221f
-       .endm
-#endif
-
-#endif /* ASM_MACROS_S */
diff --git a/include/common/aarch64/assert_macros.S b/include/common/aarch64/assert_macros.S
deleted file mode 100644 (file)
index 06371c4..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef ASSERT_MACROS_S
-#define ASSERT_MACROS_S
-
-       /*
-        * Assembler macro to enable asm_assert. Use this macro wherever
-        * assert is required in assembly. Please note that the macro makes
-        * use of label '300' to provide the logic and the caller
-        * should make sure that this label is not used to branch prior
-        * to calling this macro.
-        */
-#define ASM_ASSERT(_cc) \
-.ifndef .L_assert_filename ;\
-       .pushsection .rodata.str1.1, "aS" ;\
-       .L_assert_filename: ;\
-                       .string __FILE__ ;\
-       .popsection ;\
-.endif ;\
-       b._cc   300f ;\
-       adr     x0, .L_assert_filename ;\
-       mov     x1, __LINE__ ;\
-       b       asm_assert ;\
-300:
-
-#endif /* ASSERT_MACROS_S */
diff --git a/include/common/aarch64/console_macros.S b/include/common/aarch64/console_macros.S
deleted file mode 100644 (file)
index 5c88d4f..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef CONSOLE_MACROS_S
-#define CONSOLE_MACROS_S
-
-#include <console.h>
-
-/*
- * This macro encapsulates the common setup that has to be done at the end of
- * a console driver's register function. It will register all of the driver's
- * callbacks in the console_t structure and initialize the flags field (by
- * default consoles are enabled for the "boot" and "crash" states, this can be
- * changed after registration with the console_set_scope() function). It ends
- * with a tail call that will include return to the caller.
- * REQUIRES console_t pointer in x0 and a valid return address in x30.
- */
-/*
- * The USE_FINISH_CONSOLE_REG_2 guard is introduced to allow selection between
- * the 2 variants of the finish_console_register macro and will be removed
- * once the deprecated variant is removed.
- */
-#ifndef USE_FINISH_CONSOLE_REG_2
-#if !ERROR_DEPRECATED
-       /* This version of the macro is deprecated. Use the new version */
-       .macro  finish_console_register _driver
-       /*
-        * Add these weak definitions so we will automatically write a 0 if the
-        * function doesn't exist. I'd rather use .ifdef but that only works if
-        * the function was defined (not just declared .global) above this point
-        * in the file, which we can't guarantee.
-        */
-       .weak console_\_driver\()_putc
-       .weak console_\_driver\()_getc
-       .weak console_\_driver\()_flush
-
-       /* Don't use adrp on weak funcs! See GNU ld bugzilla issue 22589. */
-       ldr     x1, =console_\_driver\()_putc
-       str     x1, [x0, #CONSOLE_T_PUTC]
-       ldr     x1, =console_\_driver\()_getc
-       str     x1, [x0, #CONSOLE_T_GETC]
-       ldr     x1, =console_\_driver\()_flush
-       str     x1, [x0, #CONSOLE_T_FLUSH]
-       mov     x1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
-       str     x1, [x0, #CONSOLE_T_FLAGS]
-       b       console_register
-       .endm
-#endif /* ERROR_DEPRECATED */
-#else /* USE_FINISH_CONSOLE_REG_2 */
-       /* The new version of the macro not using weak references */
-       .macro  finish_console_register _driver, putc=0, getc=0, flush=0
-       /*
-        * If any of the callback is not specified or set as 0, then the
-        * corresponding callback entry in console_t is set to 0.
-        */
-       .ifne \putc
-         adrp  x1, console_\_driver\()_putc
-         add   x1, x1, :lo12:console_\_driver\()_putc
-         str   x1, [x0, #CONSOLE_T_PUTC]
-       .else
-         str   xzr, [x0, #CONSOLE_T_PUTC]
-       .endif
-
-       .ifne \getc
-         adrp  x1, console_\_driver\()_getc
-         add   x1, x1, :lo12:console_\_driver\()_getc
-         str   x1, [x0, #CONSOLE_T_GETC]
-       .else
-         str   xzr, [x0, #CONSOLE_T_GETC]
-       .endif
-
-       .ifne \flush
-         adrp  x1, console_\_driver\()_flush
-         add   x1, x1, :lo12:console_\_driver\()_flush
-         str   x1, [x0, #CONSOLE_T_FLUSH]
-       .else
-         str   xzr, [x0, #CONSOLE_T_FLUSH]
-       .endif
-
-       mov     x1, #(CONSOLE_FLAG_BOOT | CONSOLE_FLAG_CRASH)
-       str     x1, [x0, #CONSOLE_T_FLAGS]
-       b       console_register
-       .endm
-#endif /* USE_FINISH_CONSOLE_REG_2 */
-
-#endif /* CONSOLE_MACROS_S */
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
deleted file mode 100644 (file)
index 410aeab..0000000
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef EL3_COMMON_MACROS_S
-#define EL3_COMMON_MACROS_S
-
-#include <arch.h>
-#include <asm_macros.S>
-
-       /*
-        * Helper macro to initialise EL3 registers we care about.
-        */
-       .macro el3_arch_init_common
-       /* ---------------------------------------------------------------------
-        * SCTLR_EL3 has already been initialised - read current value before
-        * modifying.
-        *
-        * SCTLR_EL3.I: Enable the instruction cache.
-        *
-        * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
-        *  exception is generated if a load or store instruction executed at
-        *  EL3 uses the SP as the base address and the SP is not aligned to a
-        *  16-byte boundary.
-        *
-        * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
-        *  load or store one or more registers have an alignment check that the
-        *  address being accessed is aligned to the size of the data element(s)
-        *  being accessed.
-        * ---------------------------------------------------------------------
-        */
-       mov     x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
-       mrs     x0, sctlr_el3
-       orr     x0, x0, x1
-       msr     sctlr_el3, x0
-       isb
-
-#ifdef IMAGE_BL31
-       /* ---------------------------------------------------------------------
-        * Initialise the per-cpu cache pointer to the CPU.
-        * This is done early to enable crash reporting to have access to crash
-        * stack. Since crash reporting depends on cpu_data to report the
-        * unhandled exception, not doing so can lead to recursive exceptions
-        * due to a NULL TPIDR_EL3.
-        * ---------------------------------------------------------------------
-        */
-       bl      init_cpu_data_ptr
-#endif /* IMAGE_BL31 */
-
-       /* ---------------------------------------------------------------------
-        * Initialise SCR_EL3, setting all fields rather than relying on hw.
-        * All fields are architecturally UNKNOWN on reset. The following fields
-        * do not change during the TF lifetime. The remaining fields are set to
-        * zero here but are updated ahead of transitioning to a lower EL in the
-        * function cm_init_context_common().
-        *
-        * SCR_EL3.TWE: Set to zero so that execution of WFE instructions at
-        *  EL2, EL1 and EL0 are not trapped to EL3.
-        *
-        * SCR_EL3.TWI: Set to zero so that execution of WFI instructions at
-        *  EL2, EL1 and EL0 are not trapped to EL3.
-        *
-        * SCR_EL3.SIF: Set to one to disable instruction fetches from
-        *  Non-secure memory.
-        *
-        * SCR_EL3.SMD: Set to zero to enable SMC calls at EL1 and above, from
-        *  both Security states and both Execution states.
-        *
-        * SCR_EL3.EA: Set to one to route External Aborts and SError Interrupts
-        *  to EL3 when executing at any EL.
-        *
-        * SCR_EL3.{API,APK}: For Armv8.3 pointer authentication feature,
-        * disable traps to EL3 when accessing key registers or using pointer
-        * authentication instructions from lower ELs.
-        * ---------------------------------------------------------------------
-        */
-       mov_imm x0, ((SCR_RESET_VAL | SCR_EA_BIT | SCR_SIF_BIT | \
-                               SCR_API_BIT | SCR_APK_BIT) \
-                       & ~(SCR_TWE_BIT | SCR_TWI_BIT | SCR_SMD_BIT))
-       msr     scr_el3, x0
-
-       /* ---------------------------------------------------------------------
-        * Initialise MDCR_EL3, setting all fields rather than relying on hw.
-        * Some fields are architecturally UNKNOWN on reset.
-        *
-        * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
-        *  Debug exceptions, other than Breakpoint Instruction exceptions, are
-        *  disabled from all ELs in Secure state.
-        *
-        * MDCR_EL3.SPD32: Set to 0b10 to disable AArch32 Secure self-hosted
-        *  privileged debug from S-EL1.
-        *
-        * MDCR_EL3.TDOSA: Set to zero so that EL2 and EL2 System register
-        *  access to the powerdown debug registers do not trap to EL3.
-        *
-        * MDCR_EL3.TDA: Set to zero to allow EL0, EL1 and EL2 access to the
-        *  debug registers, other than those registers that are controlled by
-        *  MDCR_EL3.TDOSA.
-        *
-        * MDCR_EL3.TPM: Set to zero so that EL0, EL1, and EL2 System register
-        *  accesses to all Performance Monitors registers do not trap to EL3.
-        * ---------------------------------------------------------------------
-        */
-       mov_imm x0, ((MDCR_EL3_RESET_VAL | MDCR_SDD_BIT | MDCR_SPD32(MDCR_SPD32_DISABLE)) \
-                       & ~(MDCR_TDOSA_BIT | MDCR_TDA_BIT | MDCR_TPM_BIT))
-       msr     mdcr_el3, x0
-
-       /* ---------------------------------------------------------------------
-        * Enable External Aborts and SError Interrupts now that the exception
-        * vectors have been setup.
-        * ---------------------------------------------------------------------
-        */
-       msr     daifclr, #DAIF_ABT_BIT
-
-       /* ---------------------------------------------------------------------
-        * Initialise CPTR_EL3, setting all fields rather than relying on hw.
-        * All fields are architecturally UNKNOWN on reset.
-        *
-        * CPTR_EL3.TCPAC: Set to zero so that any accesses to CPACR_EL1,
-        *  CPTR_EL2, CPACR, or HCPTR do not trap to EL3.
-        *
-        * CPTR_EL3.TTA: Set to zero so that System register accesses to the
-        *  trace registers do not trap to EL3.
-        *
-        * CPTR_EL3.TFP: Set to zero so that accesses to the V- or Z- registers
-        *  by Advanced SIMD, floating-point or SVE instructions (if implemented)
-        *  do not trap to EL3.
-        */
-       mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
-       msr     cptr_el3, x0
-
-       /*
-        * If Data Independent Timing (DIT) functionality is implemented,
-        * always enable DIT in EL3
-        */
-       mrs     x0, id_aa64pfr0_el1
-       ubfx    x0, x0, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
-       cmp     x0, #ID_AA64PFR0_DIT_SUPPORTED
-       bne     1f
-       mov     x0, #DIT_BIT
-       msr     DIT, x0
-1:
-       .endm
-
-/* -----------------------------------------------------------------------------
- * This is the super set of actions that need to be performed during a cold boot
- * or a warm boot in EL3. This code is shared by BL1 and BL31.
- *
- * This macro will always perform reset handling, architectural initialisations
- * and stack setup. The rest of the actions are optional because they might not
- * be needed, depending on the context in which this macro is called. This is
- * why this macro is parameterised ; each parameter allows to enable/disable
- * some actions.
- *
- *  _init_sctlr:
- *     Whether the macro needs to initialise SCTLR_EL3, including configuring
- *      the endianness of data accesses.
- *
- *  _warm_boot_mailbox:
- *     Whether the macro needs to detect the type of boot (cold/warm). The
- *     detection is based on the platform entrypoint address : if it is zero
- *     then it is a cold boot, otherwise it is a warm boot. In the latter case,
- *     this macro jumps on the platform entrypoint address.
- *
- *  _secondary_cold_boot:
- *     Whether the macro needs to identify the CPU that is calling it: primary
- *     CPU or secondary CPU. The primary CPU will be allowed to carry on with
- *     the platform initialisations, while the secondaries will be put in a
- *     platform-specific state in the meantime.
- *
- *     If the caller knows this macro will only be called by the primary CPU
- *     then this parameter can be defined to 0 to skip this step.
- *
- * _init_memory:
- *     Whether the macro needs to initialise the memory.
- *
- * _init_c_runtime:
- *     Whether the macro needs to initialise the C runtime environment.
- *
- * _exception_vectors:
- *     Address of the exception vectors to program in the VBAR_EL3 register.
- * -----------------------------------------------------------------------------
- */
-       .macro el3_entrypoint_common                                    \
-               _init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,  \
-               _init_memory, _init_c_runtime, _exception_vectors
-
-       .if \_init_sctlr
-               /* -------------------------------------------------------------
-                * This is the initialisation of SCTLR_EL3 and so must ensure
-                * that all fields are explicitly set rather than relying on hw.
-                * Some fields reset to an IMPLEMENTATION DEFINED value and
-                * others are architecturally UNKNOWN on reset.
-                *
-                * SCTLR.EE: Set the CPU endianness before doing anything that
-                *  might involve memory reads or writes. Set to zero to select
-                *  Little Endian.
-                *
-                * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
-                *  force all memory regions that are writeable to be treated as
-                *  XN (Execute-never). Set to zero so that this control has no
-                *  effect on memory access permissions.
-                *
-                * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
-                *
-                * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
-                *
-                * SCTLR.DSSBS: Set to zero to disable speculation store bypass
-                *  safe behaviour upon exception entry to EL3.
-                * -------------------------------------------------------------
-                */
-               mov_imm x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
-                               | SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
-               msr     sctlr_el3, x0
-               isb
-       .endif /* _init_sctlr */
-
-       .if \_warm_boot_mailbox
-               /* -------------------------------------------------------------
-                * This code will be executed for both warm and cold resets.
-                * Now is the time to distinguish between the two.
-                * Query the platform entrypoint address and if it is not zero
-                * then it means it is a warm boot so jump to this address.
-                * -------------------------------------------------------------
-                */
-               bl      plat_get_my_entrypoint
-               cbz     x0, do_cold_boot
-               br      x0
-
-       do_cold_boot:
-       .endif /* _warm_boot_mailbox */
-
-       /* ---------------------------------------------------------------------
-        * Set the exception vectors.
-        * ---------------------------------------------------------------------
-        */
-       adr     x0, \_exception_vectors
-       msr     vbar_el3, x0
-       isb
-
-       /* ---------------------------------------------------------------------
-        * It is a cold boot.
-        * Perform any processor specific actions upon reset e.g. cache, TLB
-        * invalidations etc.
-        * ---------------------------------------------------------------------
-        */
-       bl      reset_handler
-
-       el3_arch_init_common
-
-       .if \_secondary_cold_boot
-               /* -------------------------------------------------------------
-                * Check if this is a primary or secondary CPU cold boot.
-                * The primary CPU will set up the platform while the
-                * secondaries are placed in a platform-specific state until the
-                * primary CPU performs the necessary actions to bring them out
-                * of that state and allows entry into the OS.
-                * -------------------------------------------------------------
-                */
-               bl      plat_is_my_cpu_primary
-               cbnz    w0, do_primary_cold_boot
-
-               /* This is a cold boot on a secondary CPU */
-               bl      plat_secondary_cold_boot_setup
-               /* plat_secondary_cold_boot_setup() is not supposed to return */
-               bl      el3_panic
-
-       do_primary_cold_boot:
-       .endif /* _secondary_cold_boot */
-
-       /* ---------------------------------------------------------------------
-        * Initialize memory now. Secondary CPU initialization won't get to this
-        * point.
-        * ---------------------------------------------------------------------
-        */
-
-       .if \_init_memory
-               bl      platform_mem_init
-       .endif /* _init_memory */
-
-       /* ---------------------------------------------------------------------
-        * Init C runtime environment:
-        *   - Zero-initialise the NOBITS sections. There are 2 of them:
-        *       - the .bss section;
-        *       - the coherent memory section (if any).
-        *   - Relocate the data section from ROM to RAM, if required.
-        * ---------------------------------------------------------------------
-        */
-       .if \_init_c_runtime
-#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
-               /* -------------------------------------------------------------
-                * Invalidate the RW memory used by the BL31 image. This
-                * includes the data and NOBITS sections. This is done to
-                * safeguard against possible corruption of this memory by
-                * dirty cache lines in a system cache as a result of use by
-                * an earlier boot loader stage.
-                * -------------------------------------------------------------
-                */
-               adrp    x0, __RW_START__
-               add     x0, x0, :lo12:__RW_START__
-               adrp    x1, __RW_END__
-               add     x1, x1, :lo12:__RW_END__
-               sub     x1, x1, x0
-               bl      inv_dcache_range
-#endif
-               adrp    x0, __BSS_START__
-               add     x0, x0, :lo12:__BSS_START__
-
-               adrp    x1, __BSS_END__
-               add     x1, x1, :lo12:__BSS_END__
-               sub     x1, x1, x0
-               bl      zeromem
-
-#if USE_COHERENT_MEM
-               adrp    x0, __COHERENT_RAM_START__
-               add     x0, x0, :lo12:__COHERENT_RAM_START__
-               adrp    x1, __COHERENT_RAM_END_UNALIGNED__
-               add     x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
-               sub     x1, x1, x0
-               bl      zeromem
-#endif
-
-#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_IN_XIP_MEM)
-               adrp    x0, __DATA_RAM_START__
-               add     x0, x0, :lo12:__DATA_RAM_START__
-               adrp    x1, __DATA_ROM_START__
-               add     x1, x1, :lo12:__DATA_ROM_START__
-               adrp    x2, __DATA_RAM_END__
-               add     x2, x2, :lo12:__DATA_RAM_END__
-               sub     x2, x2, x0
-               bl      memcpy16
-#endif
-       .endif /* _init_c_runtime */
-
-       /* ---------------------------------------------------------------------
-        * Use SP_EL0 for the C runtime stack.
-        * ---------------------------------------------------------------------
-        */
-       msr     spsel, #0
-
-       /* ---------------------------------------------------------------------
-        * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
-        * the MMU is enabled. There is no risk of reading stale stack memory
-        * after enabling the MMU as only the primary CPU is running at the
-        * moment.
-        * ---------------------------------------------------------------------
-        */
-       bl      plat_set_my_stack
-
-#if STACK_PROTECTOR_ENABLED
-       .if \_init_c_runtime
-       bl      update_stack_protector_canary
-       .endif /* _init_c_runtime */
-#endif
-       .endm
-
-#endif /* EL3_COMMON_MACROS_S */
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
deleted file mode 100644 (file)
index 8260c54..0000000
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ARCH_H
-#define ARCH_H
-
-#include <utils_def.h>
-
-/*******************************************************************************
- * MIDR bit definitions
- ******************************************************************************/
-#define MIDR_IMPL_MASK         U(0xff)
-#define MIDR_IMPL_SHIFT                U(24)
-#define MIDR_VAR_SHIFT         U(20)
-#define MIDR_VAR_BITS          U(4)
-#define MIDR_REV_SHIFT         U(0)
-#define MIDR_REV_BITS          U(4)
-#define MIDR_PN_MASK           U(0xfff)
-#define MIDR_PN_SHIFT          U(4)
-
-/*******************************************************************************
- * MPIDR macros
- ******************************************************************************/
-#define MPIDR_MT_MASK          (U(1) << 24)
-#define MPIDR_CPU_MASK         MPIDR_AFFLVL_MASK
-#define MPIDR_CLUSTER_MASK     (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
-#define MPIDR_AFFINITY_BITS    U(8)
-#define MPIDR_AFFLVL_MASK      U(0xff)
-#define MPIDR_AFFLVL_SHIFT     U(3)
-#define MPIDR_AFF0_SHIFT       U(0)
-#define MPIDR_AFF1_SHIFT       U(8)
-#define MPIDR_AFF2_SHIFT       U(16)
-#define MPIDR_AFF_SHIFT(_n)    MPIDR_AFF##_n##_SHIFT
-#define MPIDR_AFFINITY_MASK    U(0x00ffffff)
-#define MPIDR_AFFLVL0          U(0)
-#define MPIDR_AFFLVL1          U(1)
-#define MPIDR_AFFLVL2          U(2)
-#define MPIDR_AFFLVL(_n)       MPIDR_AFFLVL##_n
-
-#define MPIDR_AFFLVL0_VAL(mpidr) \
-               (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
-#define MPIDR_AFFLVL1_VAL(mpidr) \
-               (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
-#define MPIDR_AFFLVL2_VAL(mpidr) \
-               (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
-#define MPIDR_AFFLVL3_VAL(mpidr)       U(0)
-
-#define MPIDR_AFF_ID(mpid, n)                                  \
-       (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
-
-#define MPID_MASK              (MPIDR_MT_MASK                          |\
-                                (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT)|\
-                                (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT)|\
-                                (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
-
-/*
- * An invalid MPID. This value can be used by functions that return an MPID to
- * indicate an error.
- */
-#define INVALID_MPID           U(0xFFFFFFFF)
-
-/*
- * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
- * add one while using this macro to define array sizes.
- */
-#define MPIDR_MAX_AFFLVL       U(2)
-
-/* Data Cache set/way op type defines */
-#define DC_OP_ISW                      U(0x0)
-#define DC_OP_CISW                     U(0x1)
-#define DC_OP_CSW                      U(0x2)
-
-/*******************************************************************************
- * Generic timer memory mapped registers & offsets
- ******************************************************************************/
-#define CNTCR_OFF                      U(0x000)
-#define CNTFID_OFF                     U(0x020)
-
-#define CNTCR_EN                       (U(1) << 0)
-#define CNTCR_HDBG                     (U(1) << 1)
-#define CNTCR_FCREQ(x)                 ((x) << 8)
-
-/*******************************************************************************
- * System register bit definitions
- ******************************************************************************/
-/* CLIDR definitions */
-#define LOUIS_SHIFT            U(21)
-#define LOC_SHIFT              U(24)
-#define CLIDR_FIELD_WIDTH      U(3)
-
-/* CSSELR definitions */
-#define LEVEL_SHIFT            U(1)
-
-/* ID_PFR0 AMU definitions */
-#define ID_PFR0_AMU_SHIFT      U(20)
-#define ID_PFR0_AMU_LENGTH     U(4)
-#define ID_PFR0_AMU_MASK       U(0xf)
-
-/* ID_PFR0 DIT definitions */
-#define ID_PFR0_DIT_SHIFT      U(24)
-#define ID_PFR0_DIT_LENGTH     U(4)
-#define ID_PFR0_DIT_MASK       U(0xf)
-#define ID_PFR0_DIT_SUPPORTED  (U(1) << ID_PFR0_DIT_SHIFT)
-
-/* ID_PFR1 definitions */
-#define ID_PFR1_VIRTEXT_SHIFT  U(12)
-#define ID_PFR1_VIRTEXT_MASK   U(0xf)
-#define GET_VIRT_EXT(id)       (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
-                                & ID_PFR1_VIRTEXT_MASK)
-#define ID_PFR1_GIC_SHIFT      U(28)
-#define ID_PFR1_GIC_MASK       U(0xf)
-
-/* SCTLR definitions */
-#define SCTLR_RES1_DEF         ((U(1) << 23) | (U(1) << 22) | (U(1) << 4) | \
-                                (U(1) << 3))
-#if ARM_ARCH_MAJOR == 7
-#define SCTLR_RES1             SCTLR_RES1_DEF
-#else
-#define SCTLR_RES1             (SCTLR_RES1_DEF | (U(1) << 11))
-#endif
-#define SCTLR_M_BIT            (U(1) << 0)
-#define SCTLR_A_BIT            (U(1) << 1)
-#define SCTLR_C_BIT            (U(1) << 2)
-#define SCTLR_CP15BEN_BIT      (U(1) << 5)
-#define SCTLR_ITD_BIT          (U(1) << 7)
-#define SCTLR_Z_BIT            (U(1) << 11)
-#define SCTLR_I_BIT            (U(1) << 12)
-#define SCTLR_V_BIT            (U(1) << 13)
-#define SCTLR_RR_BIT           (U(1) << 14)
-#define SCTLR_NTWI_BIT         (U(1) << 16)
-#define SCTLR_NTWE_BIT         (U(1) << 18)
-#define SCTLR_WXN_BIT          (U(1) << 19)
-#define SCTLR_UWXN_BIT         (U(1) << 20)
-#define SCTLR_EE_BIT           (U(1) << 25)
-#define SCTLR_TRE_BIT          (U(1) << 28)
-#define SCTLR_AFE_BIT          (U(1) << 29)
-#define SCTLR_TE_BIT           (U(1) << 30)
-#define SCTLR_DSSBS_BIT                (U(1) << 31)
-#define SCTLR_RESET_VAL         (SCTLR_RES1 | SCTLR_NTWE_BIT |         \
-                               SCTLR_NTWI_BIT | SCTLR_CP15BEN_BIT)
-
-/* SDCR definitions */
-#define SDCR_SPD(x)            ((x) << 14)
-#define SDCR_SPD_LEGACY                U(0x0)
-#define SDCR_SPD_DISABLE       U(0x2)
-#define SDCR_SPD_ENABLE                U(0x3)
-#define SDCR_RESET_VAL         U(0x0)
-
-/* HSCTLR definitions */
-#define HSCTLR_RES1    ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
-                        (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
-                        (U(1) << 11) | (U(1) << 4) | (U(1) << 3))
-
-#define HSCTLR_M_BIT           (U(1) << 0)
-#define HSCTLR_A_BIT           (U(1) << 1)
-#define HSCTLR_C_BIT           (U(1) << 2)
-#define HSCTLR_CP15BEN_BIT     (U(1) << 5)
-#define HSCTLR_ITD_BIT         (U(1) << 7)
-#define HSCTLR_SED_BIT         (U(1) << 8)
-#define HSCTLR_I_BIT           (U(1) << 12)
-#define HSCTLR_WXN_BIT         (U(1) << 19)
-#define HSCTLR_EE_BIT          (U(1) << 25)
-#define HSCTLR_TE_BIT          (U(1) << 30)
-
-/* CPACR definitions */
-#define CPACR_FPEN(x)          ((x) << 20)
-#define CPACR_FP_TRAP_PL0      U(0x1)
-#define CPACR_FP_TRAP_ALL      U(0x2)
-#define CPACR_FP_TRAP_NONE     U(0x3)
-
-/* SCR definitions */
-#define SCR_TWE_BIT            (U(1) << 13)
-#define SCR_TWI_BIT            (U(1) << 12)
-#define SCR_SIF_BIT            (U(1) << 9)
-#define SCR_HCE_BIT            (U(1) << 8)
-#define SCR_SCD_BIT            (U(1) << 7)
-#define SCR_NET_BIT            (U(1) << 6)
-#define SCR_AW_BIT             (U(1) << 5)
-#define SCR_FW_BIT             (U(1) << 4)
-#define SCR_EA_BIT             (U(1) << 3)
-#define SCR_FIQ_BIT            (U(1) << 2)
-#define SCR_IRQ_BIT            (U(1) << 1)
-#define SCR_NS_BIT             (U(1) << 0)
-#define SCR_VALID_BIT_MASK     U(0x33ff)
-#define SCR_RESET_VAL          U(0x0)
-
-#define GET_NS_BIT(scr)                ((scr) & SCR_NS_BIT)
-
-/* HCR definitions */
-#define HCR_TGE_BIT            (U(1) << 27)
-#define HCR_AMO_BIT            (U(1) << 5)
-#define HCR_IMO_BIT            (U(1) << 4)
-#define HCR_FMO_BIT            (U(1) << 3)
-#define HCR_RESET_VAL          U(0x0)
-
-/* CNTHCTL definitions */
-#define CNTHCTL_RESET_VAL      U(0x0)
-#define PL1PCEN_BIT            (U(1) << 1)
-#define PL1PCTEN_BIT           (U(1) << 0)
-
-/* CNTKCTL definitions */
-#define PL0PTEN_BIT            (U(1) << 9)
-#define PL0VTEN_BIT            (U(1) << 8)
-#define PL0PCTEN_BIT           (U(1) << 0)
-#define PL0VCTEN_BIT           (U(1) << 1)
-#define EVNTEN_BIT             (U(1) << 2)
-#define EVNTDIR_BIT            (U(1) << 3)
-#define EVNTI_SHIFT            U(4)
-#define EVNTI_MASK             U(0xf)
-
-/* HCPTR definitions */
-#define HCPTR_RES1             ((U(1) << 13) | (U(1) << 12) | U(0x3ff))
-#define TCPAC_BIT              (U(1) << 31)
-#define TAM_BIT                        (U(1) << 30)
-#define TTA_BIT                        (U(1) << 20)
-#define TCP11_BIT              (U(1) << 11)
-#define TCP10_BIT              (U(1) << 10)
-#define HCPTR_RESET_VAL                HCPTR_RES1
-
-/* VTTBR defintions */
-#define VTTBR_RESET_VAL                ULL(0x0)
-#define VTTBR_VMID_MASK                ULL(0xff)
-#define VTTBR_VMID_SHIFT       U(48)
-#define VTTBR_BADDR_MASK       ULL(0xffffffffffff)
-#define VTTBR_BADDR_SHIFT      U(0)
-
-/* HDCR definitions */
-#define HDCR_RESET_VAL         U(0x0)
-
-/* HSTR definitions */
-#define HSTR_RESET_VAL         U(0x0)
-
-/* CNTHP_CTL definitions */
-#define CNTHP_CTL_RESET_VAL    U(0x0)
-
-/* NSACR definitions */
-#define NSASEDIS_BIT           (U(1) << 15)
-#define NSTRCDIS_BIT           (U(1) << 20)
-#define NSACR_CP11_BIT         (U(1) << 11)
-#define NSACR_CP10_BIT         (U(1) << 10)
-#define NSACR_IMP_DEF_MASK     (U(0x7) << 16)
-#define NSACR_ENABLE_FP_ACCESS (NSACR_CP11_BIT | NSACR_CP10_BIT)
-#define NSACR_RESET_VAL                U(0x0)
-
-/* CPACR definitions */
-#define ASEDIS_BIT             (U(1) << 31)
-#define TRCDIS_BIT             (U(1) << 28)
-#define CPACR_CP11_SHIFT       U(22)
-#define CPACR_CP10_SHIFT       U(20)
-#define CPACR_ENABLE_FP_ACCESS ((U(0x3) << CPACR_CP11_SHIFT) |\
-                                (U(0x3) << CPACR_CP10_SHIFT))
-#define CPACR_RESET_VAL         U(0x0)
-
-/* FPEXC definitions */
-#define FPEXC_RES1             ((U(1) << 10) | (U(1) << 9) | (U(1) << 8))
-#define FPEXC_EN_BIT           (U(1) << 30)
-#define FPEXC_RESET_VAL                FPEXC_RES1
-
-/* SPSR/CPSR definitions */
-#define SPSR_FIQ_BIT           (U(1) << 0)
-#define SPSR_IRQ_BIT           (U(1) << 1)
-#define SPSR_ABT_BIT           (U(1) << 2)
-#define SPSR_AIF_SHIFT         U(6)
-#define SPSR_AIF_MASK          U(0x7)
-
-#define SPSR_E_SHIFT           U(9)
-#define SPSR_E_MASK            U(0x1)
-#define SPSR_E_LITTLE          U(0)
-#define SPSR_E_BIG             U(1)
-
-#define SPSR_T_SHIFT           U(5)
-#define SPSR_T_MASK            U(0x1)
-#define SPSR_T_ARM             U(0)
-#define SPSR_T_THUMB           U(1)
-
-#define SPSR_MODE_SHIFT                U(0)
-#define SPSR_MODE_MASK         U(0x7)
-
-#define DISABLE_ALL_EXCEPTIONS \
-               (SPSR_FIQ_BIT | SPSR_IRQ_BIT | SPSR_ABT_BIT)
-
-#define CPSR_DIT_BIT           (U(1) << 21)
-/*
- * TTBCR definitions
- */
-#define TTBCR_EAE_BIT          (U(1) << 31)
-
-#define TTBCR_SH1_NON_SHAREABLE                (U(0x0) << 28)
-#define TTBCR_SH1_OUTER_SHAREABLE      (U(0x2) << 28)
-#define TTBCR_SH1_INNER_SHAREABLE      (U(0x3) << 28)
-
-#define TTBCR_RGN1_OUTER_NC    (U(0x0) << 26)
-#define TTBCR_RGN1_OUTER_WBA   (U(0x1) << 26)
-#define TTBCR_RGN1_OUTER_WT    (U(0x2) << 26)
-#define TTBCR_RGN1_OUTER_WBNA  (U(0x3) << 26)
-
-#define TTBCR_RGN1_INNER_NC    (U(0x0) << 24)
-#define TTBCR_RGN1_INNER_WBA   (U(0x1) << 24)
-#define TTBCR_RGN1_INNER_WT    (U(0x2) << 24)
-#define TTBCR_RGN1_INNER_WBNA  (U(0x3) << 24)
-
-#define TTBCR_EPD1_BIT         (U(1) << 23)
-#define TTBCR_A1_BIT           (U(1) << 22)
-
-#define TTBCR_T1SZ_SHIFT       U(16)
-#define TTBCR_T1SZ_MASK                U(0x7)
-#define TTBCR_TxSZ_MIN         U(0)
-#define TTBCR_TxSZ_MAX         U(7)
-
-#define TTBCR_SH0_NON_SHAREABLE                (U(0x0) << 12)
-#define TTBCR_SH0_OUTER_SHAREABLE      (U(0x2) << 12)
-#define TTBCR_SH0_INNER_SHAREABLE      (U(0x3) << 12)
-
-#define TTBCR_RGN0_OUTER_NC    (U(0x0) << 10)
-#define TTBCR_RGN0_OUTER_WBA   (U(0x1) << 10)
-#define TTBCR_RGN0_OUTER_WT    (U(0x2) << 10)
-#define TTBCR_RGN0_OUTER_WBNA  (U(0x3) << 10)
-
-#define TTBCR_RGN0_INNER_NC    (U(0x0) << 8)
-#define TTBCR_RGN0_INNER_WBA   (U(0x1) << 8)
-#define TTBCR_RGN0_INNER_WT    (U(0x2) << 8)
-#define TTBCR_RGN0_INNER_WBNA  (U(0x3) << 8)
-
-#define TTBCR_EPD0_BIT         (U(1) << 7)
-#define TTBCR_T0SZ_SHIFT       U(0)
-#define TTBCR_T0SZ_MASK                U(0x7)
-
-/*
- * HTCR definitions
- */
-#define HTCR_RES1                      ((U(1) << 31) | (U(1) << 23))
-
-#define HTCR_SH0_NON_SHAREABLE         (U(0x0) << 12)
-#define HTCR_SH0_OUTER_SHAREABLE       (U(0x2) << 12)
-#define HTCR_SH0_INNER_SHAREABLE       (U(0x3) << 12)
-
-#define HTCR_RGN0_OUTER_NC     (U(0x0) << 10)
-#define HTCR_RGN0_OUTER_WBA    (U(0x1) << 10)
-#define HTCR_RGN0_OUTER_WT     (U(0x2) << 10)
-#define HTCR_RGN0_OUTER_WBNA   (U(0x3) << 10)
-
-#define HTCR_RGN0_INNER_NC     (U(0x0) << 8)
-#define HTCR_RGN0_INNER_WBA    (U(0x1) << 8)
-#define HTCR_RGN0_INNER_WT     (U(0x2) << 8)
-#define HTCR_RGN0_INNER_WBNA   (U(0x3) << 8)
-
-#define HTCR_T0SZ_SHIFT                U(0)
-#define HTCR_T0SZ_MASK         U(0x7)
-
-#define MODE_RW_SHIFT          U(0x4)
-#define MODE_RW_MASK           U(0x1)
-#define MODE_RW_32             U(0x1)
-
-#define MODE32_SHIFT           U(0)
-#define MODE32_MASK            U(0x1f)
-#define MODE32_usr             U(0x10)
-#define MODE32_fiq             U(0x11)
-#define MODE32_irq             U(0x12)
-#define MODE32_svc             U(0x13)
-#define MODE32_mon             U(0x16)
-#define MODE32_abt             U(0x17)
-#define MODE32_hyp             U(0x1a)
-#define MODE32_und             U(0x1b)
-#define MODE32_sys             U(0x1f)
-
-#define GET_M32(mode)          (((mode) >> MODE32_SHIFT) & MODE32_MASK)
-
-#define SPSR_MODE32(mode, isa, endian, aif)            \
-       (MODE_RW_32 << MODE_RW_SHIFT |                  \
-       ((mode) & MODE32_MASK) << MODE32_SHIFT |        \
-       ((isa) & SPSR_T_MASK) << SPSR_T_SHIFT |         \
-       ((endian) & SPSR_E_MASK) << SPSR_E_SHIFT |      \
-       ((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT)
-
-/*
- * TTBR definitions
- */
-#define TTBR_CNP_BIT           ULL(0x1)
-
-/*
- * CTR definitions
- */
-#define CTR_CWG_SHIFT          U(24)
-#define CTR_CWG_MASK           U(0xf)
-#define CTR_ERG_SHIFT          U(20)
-#define CTR_ERG_MASK           U(0xf)
-#define CTR_DMINLINE_SHIFT     U(16)
-#define CTR_DMINLINE_WIDTH     U(4)
-#define CTR_DMINLINE_MASK      ((U(1) << 4) - U(1))
-#define CTR_L1IP_SHIFT         U(14)
-#define CTR_L1IP_MASK          U(0x3)
-#define CTR_IMINLINE_SHIFT     U(0)
-#define CTR_IMINLINE_MASK      U(0xf)
-
-#define MAX_CACHE_LINE_SIZE    U(0x800) /* 2KB */
-
-/* PMCR definitions */
-#define PMCR_N_SHIFT           U(11)
-#define PMCR_N_MASK            U(0x1f)
-#define PMCR_N_BITS            (PMCR_N_MASK << PMCR_N_SHIFT)
-#define PMCR_LC_BIT            (U(1) << 6)
-#define PMCR_DP_BIT            (U(1) << 5)
-
-/*******************************************************************************
- * Definitions of register offsets, fields and macros for CPU system
- * instructions.
- ******************************************************************************/
-
-#define TLBI_ADDR_SHIFT                U(0)
-#define TLBI_ADDR_MASK         U(0xFFFFF000)
-#define TLBI_ADDR(x)           (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
-
-/*******************************************************************************
- * Definitions of register offsets and fields in the CNTCTLBase Frame of the
- * system level implementation of the Generic Timer.
- ******************************************************************************/
-#define CNTCTLBASE_CNTFRQ      U(0x0)
-#define CNTNSAR                        U(0x4)
-#define CNTNSAR_NS_SHIFT(x)    (x)
-
-#define CNTACR_BASE(x)         (U(0x40) + ((x) << 2))
-#define CNTACR_RPCT_SHIFT      U(0x0)
-#define CNTACR_RVCT_SHIFT      U(0x1)
-#define CNTACR_RFRQ_SHIFT      U(0x2)
-#define CNTACR_RVOFF_SHIFT     U(0x3)
-#define CNTACR_RWVT_SHIFT      U(0x4)
-#define CNTACR_RWPT_SHIFT      U(0x5)
-
-/*******************************************************************************
- * Definitions of register offsets and fields in the CNTBaseN Frame of the
- * system level implementation of the Generic Timer.
- ******************************************************************************/
-/* Physical Count register. */
-#define CNTPCT_LO              U(0x0)
-/* Counter Frequency register. */
-#define CNTBASEN_CNTFRQ                U(0x10)
-/* Physical Timer CompareValue register. */
-#define CNTP_CVAL_LO           U(0x20)
-/* Physical Timer Control register. */
-#define CNTP_CTL               U(0x2c)
-
-/* Physical timer control register bit fields shifts and masks */
-#define CNTP_CTL_ENABLE_SHIFT   0
-#define CNTP_CTL_IMASK_SHIFT    1
-#define CNTP_CTL_ISTATUS_SHIFT  2
-
-#define CNTP_CTL_ENABLE_MASK    U(1)
-#define CNTP_CTL_IMASK_MASK     U(1)
-#define CNTP_CTL_ISTATUS_MASK   U(1)
-
-/* MAIR macros */
-#define MAIR0_ATTR_SET(attr, index)    ((attr) << ((index) << U(3)))
-#define MAIR1_ATTR_SET(attr, index)    ((attr) << (((index) - U(3)) << U(3)))
-
-/* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */
-#define SCR            p15, 0, c1, c1, 0
-#define SCTLR          p15, 0, c1, c0, 0
-#define ACTLR          p15, 0, c1, c0, 1
-#define SDCR           p15, 0, c1, c3, 1
-#define MPIDR          p15, 0, c0, c0, 5
-#define MIDR           p15, 0, c0, c0, 0
-#define HVBAR          p15, 4, c12, c0, 0
-#define VBAR           p15, 0, c12, c0, 0
-#define MVBAR          p15, 0, c12, c0, 1
-#define NSACR          p15, 0, c1, c1, 2
-#define CPACR          p15, 0, c1, c0, 2
-#define DCCIMVAC       p15, 0, c7, c14, 1
-#define DCCMVAC                p15, 0, c7, c10, 1
-#define DCIMVAC                p15, 0, c7, c6, 1
-#define DCCISW         p15, 0, c7, c14, 2
-#define DCCSW          p15, 0, c7, c10, 2
-#define DCISW          p15, 0, c7, c6, 2
-#define CTR            p15, 0, c0, c0, 1
-#define CNTFRQ         p15, 0, c14, c0, 0
-#define ID_PFR0                p15, 0, c0, c1, 0
-#define ID_PFR1                p15, 0, c0, c1, 1
-#define MAIR0          p15, 0, c10, c2, 0
-#define MAIR1          p15, 0, c10, c2, 1
-#define TTBCR          p15, 0, c2, c0, 2
-#define TTBR0          p15, 0, c2, c0, 0
-#define TTBR1          p15, 0, c2, c0, 1
-#define TLBIALL                p15, 0, c8, c7, 0
-#define TLBIALLH       p15, 4, c8, c7, 0
-#define TLBIALLIS      p15, 0, c8, c3, 0
-#define TLBIMVA                p15, 0, c8, c7, 1
-#define TLBIMVAA       p15, 0, c8, c7, 3
-#define TLBIMVAAIS     p15, 0, c8, c3, 3
-#define TLBIMVAHIS     p15, 4, c8, c3, 1
-#define BPIALLIS       p15, 0, c7, c1, 6
-#define BPIALL         p15, 0, c7, c5, 6
-#define ICIALLU                p15, 0, c7, c5, 0
-#define HSCTLR         p15, 4, c1, c0, 0
-#define HCR            p15, 4, c1, c1, 0
-#define HCPTR          p15, 4, c1, c1, 2
-#define HSTR           p15, 4, c1, c1, 3
-#define CNTHCTL                p15, 4, c14, c1, 0
-#define CNTKCTL                p15, 0, c14, c1, 0
-#define VPIDR          p15, 4, c0, c0, 0
-#define VMPIDR         p15, 4, c0, c0, 5
-#define ISR            p15, 0, c12, c1, 0
-#define CLIDR          p15, 1, c0, c0, 1
-#define CSSELR         p15, 2, c0, c0, 0
-#define CCSIDR         p15, 1, c0, c0, 0
-#define HTCR           p15, 4, c2, c0, 2
-#define HMAIR0         p15, 4, c10, c2, 0
-#define ATS1CPR                p15, 0, c7, c8, 0
-#define ATS1HR         p15, 4, c7, c8, 0
-#define DBGOSDLR       p14, 0, c1, c3, 4
-
-/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
-#define HDCR           p15, 4, c1, c1, 1
-#define PMCR           p15, 0, c9, c12, 0
-#define CNTHP_TVAL     p15, 4, c14, c2, 0
-#define CNTHP_CTL      p15, 4, c14, c2, 1
-
-/* AArch32 coproc registers for 32bit MMU descriptor support */
-#define PRRR           p15, 0, c10, c2, 0
-#define NMRR           p15, 0, c10, c2, 1
-#define DACR           p15, 0, c3, c0, 0
-
-/* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
-#define ICC_IAR1       p15, 0, c12, c12, 0
-#define ICC_IAR0       p15, 0, c12, c8, 0
-#define ICC_EOIR1      p15, 0, c12, c12, 1
-#define ICC_EOIR0      p15, 0, c12, c8, 1
-#define ICC_HPPIR1     p15, 0, c12, c12, 2
-#define ICC_HPPIR0     p15, 0, c12, c8, 2
-#define ICC_BPR1       p15, 0, c12, c12, 3
-#define ICC_BPR0       p15, 0, c12, c8, 3
-#define ICC_DIR                p15, 0, c12, c11, 1
-#define ICC_PMR                p15, 0, c4, c6, 0
-#define ICC_RPR                p15, 0, c12, c11, 3
-#define ICC_CTLR       p15, 0, c12, c12, 4
-#define ICC_MCTLR      p15, 6, c12, c12, 4
-#define ICC_SRE                p15, 0, c12, c12, 5
-#define ICC_HSRE       p15, 4, c12, c9, 5
-#define ICC_MSRE       p15, 6, c12, c12, 5
-#define ICC_IGRPEN0    p15, 0, c12, c12, 6
-#define ICC_IGRPEN1    p15, 0, c12, c12, 7
-#define ICC_MGRPEN1    p15, 6, c12, c12, 7
-
-/* 64 bit system register defines The format is: coproc, opt1, CRm */
-#define TTBR0_64       p15, 0, c2
-#define TTBR1_64       p15, 1, c2
-#define CNTVOFF_64     p15, 4, c14
-#define VTTBR_64       p15, 6, c2
-#define CNTPCT_64      p15, 0, c14
-#define HTTBR_64       p15, 4, c2
-#define CNTHP_CVAL_64  p15, 6, c14
-#define PAR_64         p15, 0, c7
-
-/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
-#define ICC_SGI1R_EL1_64       p15, 0, c12
-#define ICC_ASGI1R_EL1_64      p15, 1, c12
-#define ICC_SGI0R_EL1_64       p15, 2, c12
-
-/*******************************************************************************
- * Definitions of MAIR encodings for device and normal memory
- ******************************************************************************/
-/*
- * MAIR encodings for device memory attributes.
- */
-#define MAIR_DEV_nGnRnE                U(0x0)
-#define MAIR_DEV_nGnRE         U(0x4)
-#define MAIR_DEV_nGRE          U(0x8)
-#define MAIR_DEV_GRE           U(0xc)
-
-/*
- * MAIR encodings for normal memory attributes.
- *
- * Cache Policy
- *  WT:         Write Through
- *  WB:         Write Back
- *  NC:         Non-Cacheable
- *
- * Transient Hint
- *  NTR: Non-Transient
- *  TR:         Transient
- *
- * Allocation Policy
- *  RA:         Read Allocate
- *  WA:         Write Allocate
- *  RWA: Read and Write Allocate
- *  NA:         No Allocation
- */
-#define MAIR_NORM_WT_TR_WA     U(0x1)
-#define MAIR_NORM_WT_TR_RA     U(0x2)
-#define MAIR_NORM_WT_TR_RWA    U(0x3)
-#define MAIR_NORM_NC           U(0x4)
-#define MAIR_NORM_WB_TR_WA     U(0x5)
-#define MAIR_NORM_WB_TR_RA     U(0x6)
-#define MAIR_NORM_WB_TR_RWA    U(0x7)
-#define MAIR_NORM_WT_NTR_NA    U(0x8)
-#define MAIR_NORM_WT_NTR_WA    U(0x9)
-#define MAIR_NORM_WT_NTR_RA    U(0xa)
-#define MAIR_NORM_WT_NTR_RWA   U(0xb)
-#define MAIR_NORM_WB_NTR_NA    U(0xc)
-#define MAIR_NORM_WB_NTR_WA    U(0xd)
-#define MAIR_NORM_WB_NTR_RA    U(0xe)
-#define MAIR_NORM_WB_NTR_RWA   U(0xf)
-
-#define MAIR_NORM_OUTER_SHIFT  U(4)
-
-#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)  \
-               ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
-
-/* PAR fields */
-#define PAR_F_SHIFT    U(0)
-#define PAR_F_MASK     ULL(0x1)
-#define PAR_ADDR_SHIFT U(12)
-#define PAR_ADDR_MASK  (BIT_64(40) - ULL(1)) /* 40-bits-wide page address */
-
-/*******************************************************************************
- * Definitions for system register interface to AMU for ARMv8.4 onwards
- ******************************************************************************/
-#define AMCR           p15, 0, c13, c2, 0
-#define AMCFGR         p15, 0, c13, c2, 1
-#define AMCGCR         p15, 0, c13, c2, 2
-#define AMUSERENR      p15, 0, c13, c2, 3
-#define AMCNTENCLR0    p15, 0, c13, c2, 4
-#define AMCNTENSET0    p15, 0, c13, c2, 5
-#define AMCNTENCLR1    p15, 0, c13, c3, 0
-#define AMCNTENSET1    p15, 0, c13, c3, 1
-
-/* Activity Monitor Group 0 Event Counter Registers */
-#define AMEVCNTR00     p15, 0, c0
-#define AMEVCNTR01     p15, 1, c0
-#define AMEVCNTR02     p15, 2, c0
-#define AMEVCNTR03     p15, 3, c0
-
-/* Activity Monitor Group 0 Event Type Registers */
-#define AMEVTYPER00    p15, 0, c13, c6, 0
-#define AMEVTYPER01    p15, 0, c13, c6, 1
-#define AMEVTYPER02    p15, 0, c13, c6, 2
-#define AMEVTYPER03    p15, 0, c13, c6, 3
-
-/* Activity Monitor Group 1 Event Counter Registers */
-#define AMEVCNTR10     p15, 0, c4
-#define AMEVCNTR11     p15, 1, c4
-#define AMEVCNTR12     p15, 2, c4
-#define AMEVCNTR13     p15, 3, c4
-#define AMEVCNTR14     p15, 4, c4
-#define AMEVCNTR15     p15, 5, c4
-#define AMEVCNTR16     p15, 6, c4
-#define AMEVCNTR17     p15, 7, c4
-#define AMEVCNTR18     p15, 0, c5
-#define AMEVCNTR19     p15, 1, c5
-#define AMEVCNTR1A     p15, 2, c5
-#define AMEVCNTR1B     p15, 3, c5
-#define AMEVCNTR1C     p15, 4, c5
-#define AMEVCNTR1D     p15, 5, c5
-#define AMEVCNTR1E     p15, 6, c5
-#define AMEVCNTR1F     p15, 7, c5
-
-/* Activity Monitor Group 1 Event Type Registers */
-#define AMEVTYPER10    p15, 0, c13, c14, 0
-#define AMEVTYPER11    p15, 0, c13, c14, 1
-#define AMEVTYPER12    p15, 0, c13, c14, 2
-#define AMEVTYPER13    p15, 0, c13, c14, 3
-#define AMEVTYPER14    p15, 0, c13, c14, 4
-#define AMEVTYPER15    p15, 0, c13, c14, 5
-#define AMEVTYPER16    p15, 0, c13, c14, 6
-#define AMEVTYPER17    p15, 0, c13, c14, 7
-#define AMEVTYPER18    p15, 0, c13, c15, 0
-#define AMEVTYPER19    p15, 0, c13, c15, 1
-#define AMEVTYPER1A    p15, 0, c13, c15, 2
-#define AMEVTYPER1B    p15, 0, c13, c15, 3
-#define AMEVTYPER1C    p15, 0, c13, c15, 4
-#define AMEVTYPER1D    p15, 0, c13, c15, 5
-#define AMEVTYPER1E    p15, 0, c13, c15, 6
-#define AMEVTYPER1F    p15, 0, c13, c15, 7
-
-#endif /* ARCH_H */
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
deleted file mode 100644 (file)
index a6fe14f..0000000
+++ /dev/null
@@ -1,449 +0,0 @@
-/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ARCH_HELPERS_H
-#define ARCH_HELPERS_H
-
-#include <arch.h>
-#include <cdefs.h>
-#include <stdint.h>
-#include <string.h>
-
-/**********************************************************************
- * Macros which create inline functions to read or write CPU system
- * registers
- *********************************************************************/
-
-#define _DEFINE_COPROCR_WRITE_FUNC(_name, coproc, opc1, CRn, CRm, opc2)        \
-static inline void write_## _name(u_register_t v)                      \
-{                                                                      \
-       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
-}
-
-#define _DEFINE_COPROCR_READ_FUNC(_name, coproc, opc1, CRn, CRm, opc2) \
-static inline u_register_t read_ ## _name(void)                                \
-{                                                                      \
-       u_register_t v;                                                 \
-       __asm__ volatile ("mrc "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : "=r" (v));\
-       return v;                                                       \
-}
-
-/*
- *  The undocumented %Q and %R extended asm are used to implemented the below
- *  64 bit `mrrc` and `mcrr` instructions.
- */
-
-#define _DEFINE_COPROCR_WRITE_FUNC_64(_name, coproc, opc1, CRm)                \
-static inline void write64_## _name(uint64_t v)                                \
-{                                                                      \
-       __asm__ volatile ("mcrr "#coproc","#opc1", %Q0, %R0,"#CRm : : "r" (v));\
-}
-
-#define _DEFINE_COPROCR_READ_FUNC_64(_name, coproc, opc1, CRm)         \
-static inline uint64_t read64_## _name(void)                           \
-{      uint64_t v;                                                     \
-       __asm__ volatile ("mrrc "#coproc","#opc1", %Q0, %R0,"#CRm : "=r" (v));\
-       return v;                                                       \
-}
-
-#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)                     \
-static inline u_register_t read_ ## _name(void)                                \
-{                                                                      \
-       u_register_t v;                                                 \
-       __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v));            \
-       return v;                                                       \
-}
-
-#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)                    \
-static inline void write_ ## _name(u_register_t v)                     \
-{                                                                      \
-       __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v));        \
-}
-
-#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name)              \
-static inline void write_ ## _name(const u_register_t v)               \
-{                                                                      \
-       __asm__ volatile ("msr " #_reg_name ", %0" : : "i" (v));        \
-}
-
-/* Define read function for coproc register */
-#define DEFINE_COPROCR_READ_FUNC(_name, ...)                           \
-       _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)
-
-/* Define write function for coproc register */
-#define DEFINE_COPROCR_WRITE_FUNC(_name, ...)                          \
-       _DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
-
-/* Define read & write function for coproc register */
-#define DEFINE_COPROCR_RW_FUNCS(_name, ...)                            \
-       _DEFINE_COPROCR_READ_FUNC(_name, __VA_ARGS__)                   \
-       _DEFINE_COPROCR_WRITE_FUNC(_name, __VA_ARGS__)
-
-/* Define 64 bit read function for coproc register */
-#define DEFINE_COPROCR_READ_FUNC_64(_name, ...)                        \
-       _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)
-
-/* Define 64 bit write function for coproc register */
-#define DEFINE_COPROCR_WRITE_FUNC_64(_name, ...)                       \
-       _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
-
-/* Define 64 bit read & write function for coproc register */
-#define DEFINE_COPROCR_RW_FUNCS_64(_name, ...)                                 \
-       _DEFINE_COPROCR_READ_FUNC_64(_name, __VA_ARGS__)                \
-       _DEFINE_COPROCR_WRITE_FUNC_64(_name, __VA_ARGS__)
-
-/* Define read & write function for system register */
-#define DEFINE_SYSREG_RW_FUNCS(_name)                                  \
-       _DEFINE_SYSREG_READ_FUNC(_name, _name)                          \
-       _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
-
-/**********************************************************************
- * Macros to create inline functions for tlbi operations
- *********************************************************************/
-
-#define _DEFINE_TLBIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)         \
-static inline void tlbi##_op(void)                                     \
-{                                                                      \
-       u_register_t v = 0;                                             \
-       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
-}
-
-#define _DEFINE_BPIOP_FUNC(_op, coproc, opc1, CRn, CRm, opc2)          \
-static inline void bpi##_op(void)                                      \
-{                                                                      \
-       u_register_t v = 0;                                             \
-       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
-}
-
-#define _DEFINE_TLBIOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)   \
-static inline void tlbi##_op(u_register_t v)                           \
-{                                                                      \
-       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
-}
-
-/* Define function for simple TLBI operation */
-#define DEFINE_TLBIOP_FUNC(_op, ...)                                   \
-       _DEFINE_TLBIOP_FUNC(_op, __VA_ARGS__)
-
-/* Define function for TLBI operation with register parameter */
-#define DEFINE_TLBIOP_PARAM_FUNC(_op, ...)                             \
-       _DEFINE_TLBIOP_PARAM_FUNC(_op, __VA_ARGS__)
-
-/* Define function for simple BPI operation */
-#define DEFINE_BPIOP_FUNC(_op, ...)                                    \
-       _DEFINE_BPIOP_FUNC(_op, __VA_ARGS__)
-
-/**********************************************************************
- * Macros to create inline functions for DC operations
- *********************************************************************/
-#define _DEFINE_DCOP_PARAM_FUNC(_op, coproc, opc1, CRn, CRm, opc2)     \
-static inline void dc##_op(u_register_t v)                             \
-{                                                                      \
-       __asm__ volatile ("mcr "#coproc","#opc1",%0,"#CRn","#CRm","#opc2 : : "r" (v));\
-}
-
-/* Define function for DC operation with register parameter */
-#define DEFINE_DCOP_PARAM_FUNC(_op, ...)                               \
-       _DEFINE_DCOP_PARAM_FUNC(_op, __VA_ARGS__)
-
-/**********************************************************************
- * Macros to create inline functions for system instructions
- *********************************************************************/
- /* Define function for simple system instruction */
-#define DEFINE_SYSOP_FUNC(_op)                                         \
-static inline void _op(void)                                           \
-{                                                                      \
-       __asm__ (#_op);                                                 \
-}
-
-
-/* Define function for system instruction with type specifier */
-#define DEFINE_SYSOP_TYPE_FUNC(_op, _type)                             \
-static inline void _op ## _type(void)                                  \
-{                                                                      \
-       __asm__ (#_op " " #_type);                                      \
-}
-
-/* Define function for system instruction with register parameter */
-#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type)                       \
-static inline void _op ## _type(u_register_t v)                                \
-{                                                                      \
-        __asm__ (#_op " " #_type ", %0" : : "r" (v));                  \
-}
-
-void flush_dcache_range(uintptr_t addr, size_t size);
-void clean_dcache_range(uintptr_t addr, size_t size);
-void inv_dcache_range(uintptr_t addr, size_t size);
-
-void dcsw_op_louis(u_register_t op_type);
-void dcsw_op_all(u_register_t op_type);
-
-void disable_mmu_secure(void);
-void disable_mmu_icache_secure(void);
-
-DEFINE_SYSOP_FUNC(wfi)
-DEFINE_SYSOP_FUNC(wfe)
-DEFINE_SYSOP_FUNC(sev)
-DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
-DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
-DEFINE_SYSOP_TYPE_FUNC(dmb, st)
-
-/* dmb ld is not valid for armv7/thumb machines */
-#if ARM_ARCH_MAJOR != 7
-DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
-#endif
-
-DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
-DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
-DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
-DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
-DEFINE_SYSOP_FUNC(isb)
-
-void __dead2 smc(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3,
-                uint32_t r4, uint32_t r5, uint32_t r6, uint32_t r7);
-
-DEFINE_SYSREG_RW_FUNCS(spsr)
-DEFINE_SYSREG_RW_FUNCS(cpsr)
-
-/*******************************************************************************
- * System register accessor prototypes
- ******************************************************************************/
-DEFINE_COPROCR_READ_FUNC(mpidr, MPIDR)
-DEFINE_COPROCR_READ_FUNC(midr, MIDR)
-DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
-DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
-DEFINE_COPROCR_READ_FUNC(isr, ISR)
-DEFINE_COPROCR_READ_FUNC(clidr, CLIDR)
-DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64)
-
-DEFINE_COPROCR_RW_FUNCS(scr, SCR)
-DEFINE_COPROCR_RW_FUNCS(ctr, CTR)
-DEFINE_COPROCR_RW_FUNCS(sctlr, SCTLR)
-DEFINE_COPROCR_RW_FUNCS(actlr, ACTLR)
-DEFINE_COPROCR_RW_FUNCS(hsctlr, HSCTLR)
-DEFINE_COPROCR_RW_FUNCS(hcr, HCR)
-DEFINE_COPROCR_RW_FUNCS(hcptr, HCPTR)
-DEFINE_COPROCR_RW_FUNCS(cntfrq, CNTFRQ)
-DEFINE_COPROCR_RW_FUNCS(cnthctl, CNTHCTL)
-DEFINE_COPROCR_RW_FUNCS(mair0, MAIR0)
-DEFINE_COPROCR_RW_FUNCS(mair1, MAIR1)
-DEFINE_COPROCR_RW_FUNCS(hmair0, HMAIR0)
-DEFINE_COPROCR_RW_FUNCS(ttbcr, TTBCR)
-DEFINE_COPROCR_RW_FUNCS(htcr, HTCR)
-DEFINE_COPROCR_RW_FUNCS(ttbr0, TTBR0)
-DEFINE_COPROCR_RW_FUNCS_64(ttbr0, TTBR0_64)
-DEFINE_COPROCR_RW_FUNCS(ttbr1, TTBR1)
-DEFINE_COPROCR_RW_FUNCS_64(httbr, HTTBR_64)
-DEFINE_COPROCR_RW_FUNCS(vpidr, VPIDR)
-DEFINE_COPROCR_RW_FUNCS(vmpidr, VMPIDR)
-DEFINE_COPROCR_RW_FUNCS_64(vttbr, VTTBR_64)
-DEFINE_COPROCR_RW_FUNCS_64(ttbr1, TTBR1_64)
-DEFINE_COPROCR_RW_FUNCS_64(cntvoff, CNTVOFF_64)
-DEFINE_COPROCR_RW_FUNCS(csselr, CSSELR)
-DEFINE_COPROCR_RW_FUNCS(hstr, HSTR)
-DEFINE_COPROCR_RW_FUNCS(cnthp_ctl_el2, CNTHP_CTL)
-DEFINE_COPROCR_RW_FUNCS(cnthp_tval_el2, CNTHP_TVAL)
-DEFINE_COPROCR_RW_FUNCS_64(cnthp_cval_el2, CNTHP_CVAL_64)
-
-#define get_cntp_ctl_enable(x)  (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
-                                        CNTP_CTL_ENABLE_MASK)
-#define get_cntp_ctl_imask(x)   (((x) >> CNTP_CTL_IMASK_SHIFT) & \
-                                        CNTP_CTL_IMASK_MASK)
-#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
-                                        CNTP_CTL_ISTATUS_MASK)
-
-#define set_cntp_ctl_enable(x)  ((x) |= U(1) << CNTP_CTL_ENABLE_SHIFT)
-#define set_cntp_ctl_imask(x)   ((x) |= U(1) << CNTP_CTL_IMASK_SHIFT)
-
-#define clr_cntp_ctl_enable(x)  ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
-#define clr_cntp_ctl_imask(x)   ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
-
-DEFINE_COPROCR_RW_FUNCS(icc_sre_el1, ICC_SRE)
-DEFINE_COPROCR_RW_FUNCS(icc_sre_el2, ICC_HSRE)
-DEFINE_COPROCR_RW_FUNCS(icc_sre_el3, ICC_MSRE)
-DEFINE_COPROCR_RW_FUNCS(icc_pmr_el1, ICC_PMR)
-DEFINE_COPROCR_RW_FUNCS(icc_rpr_el1, ICC_RPR)
-DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el3, ICC_MGRPEN1)
-DEFINE_COPROCR_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1)
-DEFINE_COPROCR_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0)
-DEFINE_COPROCR_RW_FUNCS(icc_hppir0_el1, ICC_HPPIR0)
-DEFINE_COPROCR_RW_FUNCS(icc_hppir1_el1, ICC_HPPIR1)
-DEFINE_COPROCR_RW_FUNCS(icc_iar0_el1, ICC_IAR0)
-DEFINE_COPROCR_RW_FUNCS(icc_iar1_el1, ICC_IAR1)
-DEFINE_COPROCR_RW_FUNCS(icc_eoir0_el1, ICC_EOIR0)
-DEFINE_COPROCR_RW_FUNCS(icc_eoir1_el1, ICC_EOIR1)
-DEFINE_COPROCR_RW_FUNCS_64(icc_sgi0r_el1, ICC_SGI0R_EL1_64)
-DEFINE_COPROCR_WRITE_FUNC_64(icc_sgi1r, ICC_SGI1R_EL1_64)
-
-DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR)
-DEFINE_COPROCR_RW_FUNCS(cnthp_ctl, CNTHP_CTL)
-DEFINE_COPROCR_READ_FUNC(pmcr, PMCR)
-
-/*
- * Address translation
- */
-DEFINE_COPROCR_WRITE_FUNC(ats1cpr, ATS1CPR)
-DEFINE_COPROCR_WRITE_FUNC(ats1hr, ATS1HR)
-DEFINE_COPROCR_RW_FUNCS_64(par, PAR_64)
-
-DEFINE_COPROCR_RW_FUNCS(nsacr, NSACR)
-
-/* AArch32 coproc registers for 32bit MMU descriptor support */
-DEFINE_COPROCR_RW_FUNCS(prrr, PRRR)
-DEFINE_COPROCR_RW_FUNCS(nmrr, NMRR)
-DEFINE_COPROCR_RW_FUNCS(dacr, DACR)
-
-DEFINE_COPROCR_RW_FUNCS(amcntenset0, AMCNTENSET0)
-DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)
-DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
-DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
-
-DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
-DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
-DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
-DEFINE_COPROCR_RW_FUNCS_64(amevcntr03, AMEVCNTR03)
-
-/*
- * TLBI operation prototypes
- */
-DEFINE_TLBIOP_FUNC(all, TLBIALL)
-DEFINE_TLBIOP_FUNC(allis, TLBIALLIS)
-DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
-DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
-DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
-DEFINE_TLBIOP_PARAM_FUNC(mvahis, TLBIMVAHIS)
-
-/*
- * BPI operation prototypes.
- */
-DEFINE_BPIOP_FUNC(allis, BPIALLIS)
-
-/*
- * DC operation prototypes
- */
-DEFINE_DCOP_PARAM_FUNC(civac, DCCIMVAC)
-DEFINE_DCOP_PARAM_FUNC(ivac, DCIMVAC)
-DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC)
-
-/* Previously defined accessor functions with incomplete register names  */
-#define dsb()                  dsbsy()
-#define dmb()                  dmbsy()
-
-/* dmb ld is not valid for armv7/thumb machines, so alias it to dmb */
-#if ARM_ARCH_MAJOR == 7
-#define        dmbld()                 dmb()
-#endif
-
-#define IS_IN_SECURE() \
-       (GET_NS_BIT(read_scr()) == 0)
-
-#define IS_IN_HYP()    (GET_M32(read_cpsr()) == MODE32_hyp)
-#define IS_IN_SVC()    (GET_M32(read_cpsr()) == MODE32_svc)
-#define IS_IN_MON()    (GET_M32(read_cpsr()) == MODE32_mon)
-#define IS_IN_EL2()    IS_IN_HYP()
-/* If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3 */
-#define IS_IN_EL3() \
-       ((GET_M32(read_cpsr()) == MODE32_mon) ||        \
-               (IS_IN_SECURE() && (GET_M32(read_cpsr()) != MODE32_usr)))
-
-static inline unsigned int get_current_el(void)
-{
-       if (IS_IN_EL3()) {
-               return 3U;
-       } else if (IS_IN_EL2()) {
-               return 2U;
-       } else {
-               return 1U;
-       }
-}
-
-/* Macros for compatibility with AArch64 system registers */
-#define read_mpidr_el1()       read_mpidr()
-
-#define read_scr_el3()         read_scr()
-#define write_scr_el3(_v)      write_scr(_v)
-
-#define read_hcr_el2()         read_hcr()
-#define write_hcr_el2(_v)      write_hcr(_v)
-
-#define read_cpacr_el1()       read_cpacr()
-#define write_cpacr_el1(_v)    write_cpacr(_v)
-
-#define read_cntfrq_el0()      read_cntfrq()
-#define write_cntfrq_el0(_v)   write_cntfrq(_v)
-#define read_isr_el1()         read_isr()
-
-#define read_cntpct_el0()      read64_cntpct()
-
-#define read_ctr_el0()         read_ctr()
-
-#define write_icc_sgi0r_el1(_v)        write64_icc_sgi0r_el1(_v)
-
-#define read_daif()            read_cpsr()
-#define write_daif(flags)      write_cpsr(flags)
-
-#define read_cnthp_cval_el2()  read64_cnthp_cval_el2()
-#define write_cnthp_cval_el2(v)        write64_cnthp_cval_el2(v)
-
-#define read_amcntenset0_el0() read_amcntenset0()
-#define read_amcntenset1_el0() read_amcntenset1()
-
-/* Helper functions to manipulate CPSR */
-static inline void enable_irq(void)
-{
-       /*
-        * The compiler memory barrier will prevent the compiler from
-        * scheduling non-volatile memory access after the write to the
-        * register.
-        *
-        * This could happen if some initialization code issues non-volatile
-        * accesses to an area used by an interrupt handler, in the assumption
-        * that it is safe as the interrupts are disabled at the time it does
-        * that (according to program order). However, non-volatile accesses
-        * are not necessarily in program order relatively with volatile inline
-        * assembly statements (and volatile accesses).
-        */
-       COMPILER_BARRIER();
-       __asm__ volatile ("cpsie        i");
-       isb();
-}
-
-static inline void enable_serror(void)
-{
-       COMPILER_BARRIER();
-       __asm__ volatile ("cpsie        a");
-       isb();
-}
-
-static inline void enable_fiq(void)
-{
-       COMPILER_BARRIER();
-       __asm__ volatile ("cpsie        f");
-       isb();
-}
-
-static inline void disable_irq(void)
-{
-       COMPILER_BARRIER();
-       __asm__ volatile ("cpsid        i");
-       isb();
-}
-
-static inline void disable_serror(void)
-{
-       COMPILER_BARRIER();
-       __asm__ volatile ("cpsid        a");
-       isb();
-}
-
-static inline void disable_fiq(void)
-{
-       COMPILER_BARRIER();
-       __asm__ volatile ("cpsid        f");
-       isb();
-}
-
-#endif /* ARCH_HELPERS_H */
diff --git a/include/lib/aarch32/smccc_helpers.h b/include/lib/aarch32/smccc_helpers.h
deleted file mode 100644 (file)
index 67952ec..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SMCCC_HELPERS_H
-#define SMCCC_HELPERS_H
-
-#include <smccc.h>
-
-/* These are offsets to registers in smc_ctx_t */
-#define SMC_CTX_GPREG_R0       U(0x0)
-#define SMC_CTX_GPREG_R1       U(0x4)
-#define SMC_CTX_GPREG_R2       U(0x8)
-#define SMC_CTX_GPREG_R3       U(0xC)
-#define SMC_CTX_GPREG_R4       U(0x10)
-#define SMC_CTX_GPREG_R5       U(0x14)
-#define SMC_CTX_SP_USR         U(0x34)
-#define SMC_CTX_SPSR_MON       U(0x78)
-#define SMC_CTX_SP_MON         U(0x7C)
-#define SMC_CTX_LR_MON         U(0x80)
-#define SMC_CTX_SCR            U(0x84)
-#define SMC_CTX_PMCR           U(0x88)
-#define SMC_CTX_SIZE           U(0x90)
-
-#ifndef __ASSEMBLY__
-#include <cassert.h>
-#include <stdint.h>
-
-/*
- * The generic structure to save arguments and callee saved registers during
- * an SMC. Also this structure is used to store the result return values after
- * the completion of SMC service.
- */
-typedef struct smc_ctx {
-       u_register_t r0;
-       u_register_t r1;
-       u_register_t r2;
-       u_register_t r3;
-       u_register_t r4;
-       u_register_t r5;
-       u_register_t r6;
-       u_register_t r7;
-       u_register_t r8;
-       u_register_t r9;
-       u_register_t r10;
-       u_register_t r11;
-       u_register_t r12;
-       /* spsr_usr doesn't exist */
-       u_register_t sp_usr;
-       u_register_t lr_usr;
-       u_register_t spsr_irq;
-       u_register_t sp_irq;
-       u_register_t lr_irq;
-       u_register_t spsr_fiq;
-       u_register_t sp_fiq;
-       u_register_t lr_fiq;
-       u_register_t spsr_svc;
-       u_register_t sp_svc;
-       u_register_t lr_svc;
-       u_register_t spsr_abt;
-       u_register_t sp_abt;
-       u_register_t lr_abt;
-       u_register_t spsr_und;
-       u_register_t sp_und;
-       u_register_t lr_und;
-       u_register_t spsr_mon;
-       /*
-        * `sp_mon` will point to the C runtime stack in monitor mode. But prior
-        * to exit from SMC, this will point to the `smc_ctx_t` so that
-        * on next entry due to SMC, the `smc_ctx_t` can be easily accessed.
-        */
-       u_register_t sp_mon;
-       u_register_t lr_mon;
-       u_register_t scr;
-       u_register_t pmcr;
-       /*
-        * The workaround for CVE-2017-5715 requires storing information in
-        * the bottom 3 bits of the stack pointer.  Add a padding field to
-        * force the size of the struct to be a multiple of 8.
-        */
-       u_register_t pad;
-} smc_ctx_t __aligned(8);
-
-/*
- * Compile time assertions related to the 'smc_context' structure to
- * ensure that the assembler and the compiler view of the offsets of
- * the structure members is the same.
- */
-CASSERT(SMC_CTX_GPREG_R0 == __builtin_offsetof(smc_ctx_t, r0), \
-       assert_smc_ctx_greg_r0_offset_mismatch);
-CASSERT(SMC_CTX_GPREG_R1 == __builtin_offsetof(smc_ctx_t, r1), \
-       assert_smc_ctx_greg_r1_offset_mismatch);
-CASSERT(SMC_CTX_GPREG_R2 == __builtin_offsetof(smc_ctx_t, r2), \
-       assert_smc_ctx_greg_r2_offset_mismatch);
-CASSERT(SMC_CTX_GPREG_R3 == __builtin_offsetof(smc_ctx_t, r3), \
-       assert_smc_ctx_greg_r3_offset_mismatch);
-CASSERT(SMC_CTX_GPREG_R4 == __builtin_offsetof(smc_ctx_t, r4), \
-       assert_smc_ctx_greg_r4_offset_mismatch);
-CASSERT(SMC_CTX_SP_USR == __builtin_offsetof(smc_ctx_t, sp_usr), \
-       assert_smc_ctx_sp_usr_offset_mismatch);
-CASSERT(SMC_CTX_LR_MON == __builtin_offsetof(smc_ctx_t, lr_mon), \
-       assert_smc_ctx_lr_mon_offset_mismatch);
-CASSERT(SMC_CTX_SPSR_MON == __builtin_offsetof(smc_ctx_t, spsr_mon), \
-       assert_smc_ctx_spsr_mon_offset_mismatch);
-
-CASSERT((sizeof(smc_ctx_t) & 0x7U) == 0U, assert_smc_ctx_not_aligned);
-CASSERT(SMC_CTX_SIZE == sizeof(smc_ctx_t), assert_smc_ctx_size_mismatch);
-
-/* Convenience macros to return from SMC handler */
-#define SMC_RET0(_h) {                         \
-       return (uintptr_t)(_h);                 \
-}
-#define SMC_RET1(_h, _r0) {                    \
-       ((smc_ctx_t *)(_h))->r0 = (_r0);        \
-       SMC_RET0(_h);                           \
-}
-#define SMC_RET2(_h, _r0, _r1) {               \
-       ((smc_ctx_t *)(_h))->r1 = (_r1);        \
-       SMC_RET1(_h, (_r0));                    \
-}
-#define SMC_RET3(_h, _r0, _r1, _r2) {          \
-       ((smc_ctx_t *)(_h))->r2 = (_r2);        \
-       SMC_RET2(_h, (_r0), (_r1));             \
-}
-#define SMC_RET4(_h, _r0, _r1, _r2, _r3) {     \
-       ((smc_ctx_t *)(_h))->r3 = (_r3);        \
-       SMC_RET3(_h, (_r0), (_r1), (_r2));      \
-}
-
-/*
- * Helper macro to retrieve the SMC parameters from smc_ctx_t.
- */
-#define get_smc_params_from_ctx(_hdl, _r1, _r2, _r3, _r4) {    \
-               _r1 = ((smc_ctx_t *)_hdl)->r1;          \
-               _r2 = ((smc_ctx_t *)_hdl)->r2;          \
-               _r3 = ((smc_ctx_t *)_hdl)->r3;          \
-               _r4 = ((smc_ctx_t *)_hdl)->r4;          \
-               }
-
-/* ------------------------------------------------------------------------
- * Helper APIs for setting and retrieving appropriate `smc_ctx_t`.
- * These functions need to implemented by the BL including this library.
- * ------------------------------------------------------------------------
- */
-
-/* Get the pointer to `smc_ctx_t` corresponding to the security state. */
-void *smc_get_ctx(unsigned int security_state);
-
-/* Set the next `smc_ctx_t` corresponding to the security state. */
-void smc_set_next_ctx(unsigned int security_state);
-
-/* Get the pointer to next `smc_ctx_t` already set by `smc_set_next_ctx()`. */
-void *smc_get_next_ctx(void);
-
-#endif /*__ASSEMBLY__*/
-
-#endif /* SMCCC_HELPERS_H */
diff --git a/include/lib/aarch32/smccc_macros.S b/include/lib/aarch32/smccc_macros.S
deleted file mode 100644 (file)
index 1fe6c64..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef SMCCC_MACROS_S
-#define SMCCC_MACROS_S
-
-#include <arch.h>
-
-/*
- * Macro to save the General purpose registers (r0 - r12), the banked
- * spsr, lr, sp registers and the `scr` register to the SMC context on entry
- * due a SMC call. The `lr` of the current mode (monitor) is expected to be
- * already saved. The `sp` must point to the `smc_ctx_t` to save to.
- * Additionally, also save the 'pmcr' register as this is updated whilst
- * executing in the secure world.
- */
-       .macro smccc_save_gp_mode_regs
-       /* Save r0 - r12 in the SMC context */
-       stm     sp, {r0-r12}
-       mov     r0, sp
-       add     r0, r0, #SMC_CTX_SP_USR
-
-#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
-       /* Must be in secure state to restore Monitor mode */
-       ldcopr  r4, SCR
-       bic     r2, r4, #SCR_NS_BIT
-       stcopr  r2, SCR
-       isb
-
-       cps     #MODE32_sys
-       stm     r0!, {sp, lr}
-
-       cps     #MODE32_irq
-       mrs     r2, spsr
-       stm     r0!, {r2, sp, lr}
-
-       cps     #MODE32_fiq
-       mrs     r2, spsr
-       stm     r0!, {r2, sp, lr}
-
-       cps     #MODE32_svc
-       mrs     r2, spsr
-       stm     r0!, {r2, sp, lr}
-
-       cps     #MODE32_abt
-       mrs     r2, spsr
-       stm     r0!, {r2, sp, lr}
-
-       cps     #MODE32_und
-       mrs     r2, spsr
-       stm     r0!, {r2, sp, lr}
-
-       /* lr_mon is already saved by caller */
-       cps     #MODE32_mon
-       mrs     r2, spsr
-       stm     r0!, {r2}
-
-       stcopr  r4, SCR
-       isb
-#else
-       /* Save the banked registers including the current SPSR and LR */
-       mrs     r4, sp_usr
-       mrs     r5, lr_usr
-       mrs     r6, spsr_irq
-       mrs     r7, sp_irq
-       mrs     r8, lr_irq
-       mrs     r9, spsr_fiq
-       mrs     r10, sp_fiq
-       mrs     r11, lr_fiq
-       mrs     r12, spsr_svc
-       stm     r0!, {r4-r12}
-
-       mrs     r4, sp_svc
-       mrs     r5, lr_svc
-       mrs     r6, spsr_abt
-       mrs     r7, sp_abt
-       mrs     r8, lr_abt
-       mrs     r9, spsr_und
-       mrs     r10, sp_und
-       mrs     r11, lr_und
-       mrs     r12, spsr
-       stm     r0!, {r4-r12}
-       /* lr_mon is already saved by caller */
-
-       ldcopr  r4, SCR
-#endif
-       str     r4, [sp, #SMC_CTX_SCR]
-       ldcopr  r4, PMCR
-       str     r4, [sp, #SMC_CTX_PMCR]
-       .endm
-
-/*
- * Macro to restore the `smc_ctx_t`, which includes the General purpose
- * registers and banked mode registers, and exit from the monitor mode.
- * r0 must point to the `smc_ctx_t` to restore from.
- */
-       .macro monitor_exit
-       /*
-        * Save the current sp and restore the smc context
-        * pointer to sp which will be used for handling the
-        * next SMC.
-        */
-       str     sp, [r0, #SMC_CTX_SP_MON]
-       mov     sp, r0
-
-       /*
-        * Restore SCR first so that we access the right banked register
-        * when the other mode registers are restored.
-        */
-       ldr     r1, [r0, #SMC_CTX_SCR]
-       stcopr  r1, SCR
-       isb
-
-       /*
-        * Restore the PMCR register.
-        */
-       ldr     r1, [r0, #SMC_CTX_PMCR]
-       stcopr  r1, PMCR
-
-       /* Restore the banked registers including the current SPSR */
-       add     r1, r0, #SMC_CTX_SP_USR
-
-#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
-       /* Must be in secure state to restore Monitor mode */
-       ldcopr  r4, SCR
-       bic     r2, r4, #SCR_NS_BIT
-       stcopr  r2, SCR
-       isb
-
-       cps     #MODE32_sys
-       ldm     r1!, {sp, lr}
-
-       cps     #MODE32_irq
-       ldm     r1!, {r2, sp, lr}
-       msr     spsr_fsxc, r2
-
-       cps     #MODE32_fiq
-       ldm     r1!, {r2, sp, lr}
-       msr     spsr_fsxc, r2
-
-       cps     #MODE32_svc
-       ldm     r1!, {r2, sp, lr}
-       msr     spsr_fsxc, r2
-
-       cps     #MODE32_abt
-       ldm     r1!, {r2, sp, lr}
-       msr     spsr_fsxc, r2
-
-       cps     #MODE32_und
-       ldm     r1!, {r2, sp, lr}
-       msr     spsr_fsxc, r2
-
-       cps     #MODE32_mon
-       ldm     r1!, {r2}
-       msr     spsr_fsxc, r2
-
-       stcopr  r4, SCR
-       isb
-#else
-       ldm     r1!, {r4-r12}
-       msr     sp_usr, r4
-       msr     lr_usr, r5
-       msr     spsr_irq, r6
-       msr     sp_irq, r7
-       msr     lr_irq, r8
-       msr     spsr_fiq, r9
-       msr     sp_fiq, r10
-       msr     lr_fiq, r11
-       msr     spsr_svc, r12
-
-       ldm     r1!, {r4-r12}
-       msr     sp_svc, r4
-       msr     lr_svc, r5
-       msr     spsr_abt, r6
-       msr     sp_abt, r7
-       msr     lr_abt, r8
-       msr     spsr_und, r9
-       msr     sp_und, r10
-       msr     lr_und, r11
-       /*
-        * Use the `_fsxc` suffix explicitly to instruct the assembler
-        * to update all the 32 bits of SPSR. Else, by default, the
-        * assembler assumes `_fc` suffix which only modifies
-        * f->[31:24] and c->[7:0] bits of SPSR.
-        */
-       msr     spsr_fsxc, r12
-#endif
-
-       /* Restore the LR */
-       ldr     lr, [r0, #SMC_CTX_LR_MON]
-
-       /* Restore the rest of the general purpose registers */
-       ldm     r0, {r0-r12}
-       eret
-       .endm
-
-#endif /* SMCCC_MACROS_S */
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
deleted file mode 100644 (file)
index 72a14dc..0000000
+++ /dev/null
@@ -1,823 +0,0 @@
-/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ARCH_H
-#define ARCH_H
-
-#include <utils_def.h>
-
-/*******************************************************************************
- * MIDR bit definitions
- ******************************************************************************/
-#define MIDR_IMPL_MASK         U(0xff)
-#define MIDR_IMPL_SHIFT                U(0x18)
-#define MIDR_VAR_SHIFT         U(20)
-#define MIDR_VAR_BITS          U(4)
-#define MIDR_VAR_MASK          U(0xf)
-#define MIDR_REV_SHIFT         U(0)
-#define MIDR_REV_BITS          U(4)
-#define MIDR_REV_MASK          U(0xf)
-#define MIDR_PN_MASK           U(0xfff)
-#define MIDR_PN_SHIFT          U(0x4)
-
-/*******************************************************************************
- * MPIDR macros
- ******************************************************************************/
-#define MPIDR_MT_MASK          (ULL(1) << 24)
-#define MPIDR_CPU_MASK         MPIDR_AFFLVL_MASK
-#define MPIDR_CLUSTER_MASK     (MPIDR_AFFLVL_MASK << MPIDR_AFFINITY_BITS)
-#define MPIDR_AFFINITY_BITS    U(8)
-#define MPIDR_AFFLVL_MASK      ULL(0xff)
-#define MPIDR_AFF0_SHIFT       U(0)
-#define MPIDR_AFF1_SHIFT       U(8)
-#define MPIDR_AFF2_SHIFT       U(16)
-#define MPIDR_AFF3_SHIFT       U(32)
-#define MPIDR_AFF_SHIFT(_n)    MPIDR_AFF##_n##_SHIFT
-#define MPIDR_AFFINITY_MASK    ULL(0xff00ffffff)
-#define MPIDR_AFFLVL_SHIFT     U(3)
-#define MPIDR_AFFLVL0          ULL(0x0)
-#define MPIDR_AFFLVL1          ULL(0x1)
-#define MPIDR_AFFLVL2          ULL(0x2)
-#define MPIDR_AFFLVL3          ULL(0x3)
-#define MPIDR_AFFLVL(_n)       MPIDR_AFFLVL##_n
-#define MPIDR_AFFLVL0_VAL(mpidr) \
-               (((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK)
-#define MPIDR_AFFLVL1_VAL(mpidr) \
-               (((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK)
-#define MPIDR_AFFLVL2_VAL(mpidr) \
-               (((mpidr) >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK)
-#define MPIDR_AFFLVL3_VAL(mpidr) \
-               (((mpidr) >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK)
-/*
- * The MPIDR_MAX_AFFLVL count starts from 0. Take care to
- * add one while using this macro to define array sizes.
- * TODO: Support only the first 3 affinity levels for now.
- */
-#define MPIDR_MAX_AFFLVL       U(2)
-
-#define MPID_MASK              (MPIDR_MT_MASK                           | \
-                                (MPIDR_AFFLVL_MASK << MPIDR_AFF3_SHIFT) | \
-                                (MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT) | \
-                                (MPIDR_AFFLVL_MASK << MPIDR_AFF1_SHIFT) | \
-                                (MPIDR_AFFLVL_MASK << MPIDR_AFF0_SHIFT))
-
-#define MPIDR_AFF_ID(mpid, n)                                  \
-       (((mpid) >> MPIDR_AFF_SHIFT(n)) & MPIDR_AFFLVL_MASK)
-
-/*
- * An invalid MPID. This value can be used by functions that return an MPID to
- * indicate an error.
- */
-#define INVALID_MPID           U(0xFFFFFFFF)
-
-/*******************************************************************************
- * Definitions for CPU system register interface to GICv3
- ******************************************************************************/
-#define ICC_IGRPEN1_EL1                S3_0_C12_C12_7
-#define ICC_SGI1R              S3_0_C12_C11_5
-#define ICC_SRE_EL1            S3_0_C12_C12_5
-#define ICC_SRE_EL2            S3_4_C12_C9_5
-#define ICC_SRE_EL3            S3_6_C12_C12_5
-#define ICC_CTLR_EL1           S3_0_C12_C12_4
-#define ICC_CTLR_EL3           S3_6_C12_C12_4
-#define ICC_PMR_EL1            S3_0_C4_C6_0
-#define ICC_RPR_EL1            S3_0_C12_C11_3
-#define ICC_IGRPEN1_EL3                S3_6_c12_c12_7
-#define ICC_IGRPEN0_EL1                S3_0_c12_c12_6
-#define ICC_HPPIR0_EL1         S3_0_c12_c8_2
-#define ICC_HPPIR1_EL1         S3_0_c12_c12_2
-#define ICC_IAR0_EL1           S3_0_c12_c8_0
-#define ICC_IAR1_EL1           S3_0_c12_c12_0
-#define ICC_EOIR0_EL1          S3_0_c12_c8_1
-#define ICC_EOIR1_EL1          S3_0_c12_c12_1
-#define ICC_SGI0R_EL1          S3_0_c12_c11_7
-
-/*******************************************************************************
- * Generic timer memory mapped registers & offsets
- ******************************************************************************/
-#define CNTCR_OFF                      U(0x000)
-#define CNTFID_OFF                     U(0x020)
-
-#define CNTCR_EN                       (U(1) << 0)
-#define CNTCR_HDBG                     (U(1) << 1)
-#define CNTCR_FCREQ(x)                 ((x) << 8)
-
-/*******************************************************************************
- * System register bit definitions
- ******************************************************************************/
-/* CLIDR definitions */
-#define LOUIS_SHIFT            U(21)
-#define LOC_SHIFT              U(24)
-#define CLIDR_FIELD_WIDTH      U(3)
-
-/* CSSELR definitions */
-#define LEVEL_SHIFT            U(1)
-
-/* Data cache set/way op type defines */
-#define DCISW                  U(0x0)
-#define DCCISW                 U(0x1)
-#define DCCSW                  U(0x2)
-
-/* ID_AA64PFR0_EL1 definitions */
-#define ID_AA64PFR0_EL0_SHIFT  U(0)
-#define ID_AA64PFR0_EL1_SHIFT  U(4)
-#define ID_AA64PFR0_EL2_SHIFT  U(8)
-#define ID_AA64PFR0_EL3_SHIFT  U(12)
-#define ID_AA64PFR0_AMU_SHIFT  U(44)
-#define ID_AA64PFR0_AMU_LENGTH U(4)
-#define ID_AA64PFR0_AMU_MASK   ULL(0xf)
-#define ID_AA64PFR0_ELX_MASK   ULL(0xf)
-#define ID_AA64PFR0_SVE_SHIFT  U(32)
-#define ID_AA64PFR0_SVE_MASK   ULL(0xf)
-#define ID_AA64PFR0_SVE_LENGTH U(4)
-#define ID_AA64PFR0_MPAM_SHIFT U(40)
-#define ID_AA64PFR0_MPAM_MASK  ULL(0xf)
-#define ID_AA64PFR0_DIT_SHIFT  U(48)
-#define ID_AA64PFR0_DIT_MASK   ULL(0xf)
-#define ID_AA64PFR0_DIT_LENGTH U(4)
-#define ID_AA64PFR0_DIT_SUPPORTED      U(1)
-#define ID_AA64PFR0_CSV2_SHIFT U(56)
-#define ID_AA64PFR0_CSV2_MASK  ULL(0xf)
-#define ID_AA64PFR0_CSV2_LENGTH        U(4)
-
-/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
-#define ID_AA64DFR0_PMS_SHIFT  U(32)
-#define ID_AA64DFR0_PMS_LENGTH U(4)
-#define ID_AA64DFR0_PMS_MASK   ULL(0xf)
-
-#define EL_IMPL_NONE           ULL(0)
-#define EL_IMPL_A64ONLY                ULL(1)
-#define EL_IMPL_A64_A32                ULL(2)
-
-#define ID_AA64PFR0_GIC_SHIFT  U(24)
-#define ID_AA64PFR0_GIC_WIDTH  U(4)
-#define ID_AA64PFR0_GIC_MASK   ((ULL(1) << ID_AA64PFR0_GIC_WIDTH) - ULL(1))
-
-/* ID_AA64MMFR0_EL1 definitions */
-#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
-#define ID_AA64MMFR0_EL1_PARANGE_MASK  ULL(0xf)
-
-/* ID_AA64ISAR1_EL1 definitions */
-#define ID_AA64ISAR1_GPI_SHIFT U(28)
-#define ID_AA64ISAR1_GPI_WIDTH U(4)
-#define ID_AA64ISAR1_GPA_SHIFT U(24)
-#define ID_AA64ISAR1_GPA_WIDTH U(4)
-#define ID_AA64ISAR1_API_SHIFT U(8)
-#define ID_AA64ISAR1_API_WIDTH U(4)
-#define ID_AA64ISAR1_APA_SHIFT U(4)
-#define ID_AA64ISAR1_APA_WIDTH U(4)
-
-#define ID_AA64ISAR1_GPI_MASK \
-       (((ULL(1) << ID_AA64ISAR1_GPI_WIDTH) - ULL(1)) << ID_AA64ISAR1_GPI_SHIFT)
-#define ID_AA64ISAR1_GPA_MASK \
-       (((ULL(1) << ID_AA64ISAR1_GPA_WIDTH) - ULL(1)) << ID_AA64ISAR1_GPA_SHIFT)
-#define ID_AA64ISAR1_API_MASK \
-       (((ULL(1) << ID_AA64ISAR1_API_WIDTH) - ULL(1)) << ID_AA64ISAR1_API_SHIFT)
-#define ID_AA64ISAR1_APA_MASK \
-       (((ULL(1) << ID_AA64ISAR1_APA_WIDTH) - ULL(1)) << ID_AA64ISAR1_APA_SHIFT)
-
-#define PARANGE_0000   U(32)
-#define PARANGE_0001   U(36)
-#define PARANGE_0010   U(40)
-#define PARANGE_0011   U(42)
-#define PARANGE_0100   U(44)
-#define PARANGE_0101   U(48)
-#define PARANGE_0110   U(52)
-
-#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT          U(28)
-#define ID_AA64MMFR0_EL1_TGRAN4_MASK           ULL(0xf)
-#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED      ULL(0x0)
-#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED  ULL(0xf)
-
-#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT         U(24)
-#define ID_AA64MMFR0_EL1_TGRAN64_MASK          ULL(0xf)
-#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED     ULL(0x0)
-#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
-
-#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT         U(20)
-#define ID_AA64MMFR0_EL1_TGRAN16_MASK          ULL(0xf)
-#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED     ULL(0x1)
-#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
-
-/* ID_AA64PFR1_EL1 definitions */
-#define ID_AA64PFR1_EL1_SSBS_SHIFT     U(4)
-#define ID_AA64PFR1_EL1_SSBS_MASK      ULL(0xf)
-
-#define SSBS_UNAVAILABLE       ULL(0)  /* No architectural SSBS support */
-
-/* ID_PFR1_EL1 definitions */
-#define ID_PFR1_VIRTEXT_SHIFT  U(12)
-#define ID_PFR1_VIRTEXT_MASK   U(0xf)
-#define GET_VIRT_EXT(id)       (((id) >> ID_PFR1_VIRTEXT_SHIFT) \
-                                & ID_PFR1_VIRTEXT_MASK)
-
-/* SCTLR definitions */
-#define SCTLR_EL2_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
-                        (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
-                        (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
-
-#define SCTLR_EL1_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
-                        (U(1) << 22) | (U(1) << 20) | (U(1) << 11))
-#define SCTLR_AARCH32_EL1_RES1 \
-                       ((U(1) << 23) | (U(1) << 22) | (U(1) << 11) | \
-                        (U(1) << 4) | (U(1) << 3))
-
-#define SCTLR_EL3_RES1 ((U(1) << 29) | (U(1) << 28) | (U(1) << 23) | \
-                       (U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
-                       (U(1) << 11) | (U(1) << 5) | (U(1) << 4))
-
-#define SCTLR_M_BIT            (ULL(1) << 0)
-#define SCTLR_A_BIT            (ULL(1) << 1)
-#define SCTLR_C_BIT            (ULL(1) << 2)
-#define SCTLR_SA_BIT           (ULL(1) << 3)
-#define SCTLR_SA0_BIT          (ULL(1) << 4)
-#define SCTLR_CP15BEN_BIT      (ULL(1) << 5)
-#define SCTLR_ITD_BIT          (ULL(1) << 7)
-#define SCTLR_SED_BIT          (ULL(1) << 8)
-#define SCTLR_UMA_BIT          (ULL(1) << 9)
-#define SCTLR_I_BIT            (ULL(1) << 12)
-#define SCTLR_V_BIT            (ULL(1) << 13)
-#define SCTLR_DZE_BIT          (ULL(1) << 14)
-#define SCTLR_UCT_BIT          (ULL(1) << 15)
-#define SCTLR_NTWI_BIT         (ULL(1) << 16)
-#define SCTLR_NTWE_BIT         (ULL(1) << 18)
-#define SCTLR_WXN_BIT          (ULL(1) << 19)
-#define SCTLR_UWXN_BIT         (ULL(1) << 20)
-#define SCTLR_E0E_BIT          (ULL(1) << 24)
-#define SCTLR_EE_BIT           (ULL(1) << 25)
-#define SCTLR_UCI_BIT          (ULL(1) << 26)
-#define SCTLR_TRE_BIT          (ULL(1) << 28)
-#define SCTLR_AFE_BIT          (ULL(1) << 29)
-#define SCTLR_TE_BIT           (ULL(1) << 30)
-#define SCTLR_DSSBS_BIT                (ULL(1) << 44)
-#define SCTLR_RESET_VAL                SCTLR_EL3_RES1
-
-/* CPACR_El1 definitions */
-#define CPACR_EL1_FPEN(x)      ((x) << 20)
-#define CPACR_EL1_FP_TRAP_EL0  U(0x1)
-#define CPACR_EL1_FP_TRAP_ALL  U(0x2)
-#define CPACR_EL1_FP_TRAP_NONE U(0x3)
-
-/* SCR definitions */
-#define SCR_RES1_BITS          ((U(1) << 4) | (U(1) << 5))
-#define SCR_FIEN_BIT           (U(1) << 21)
-#define SCR_API_BIT            (U(1) << 17)
-#define SCR_APK_BIT            (U(1) << 16)
-#define SCR_TWE_BIT            (U(1) << 13)
-#define SCR_TWI_BIT            (U(1) << 12)
-#define SCR_ST_BIT             (U(1) << 11)
-#define SCR_RW_BIT             (U(1) << 10)
-#define SCR_SIF_BIT            (U(1) << 9)
-#define SCR_HCE_BIT            (U(1) << 8)
-#define SCR_SMD_BIT            (U(1) << 7)
-#define SCR_EA_BIT             (U(1) << 3)
-#define SCR_FIQ_BIT            (U(1) << 2)
-#define SCR_IRQ_BIT            (U(1) << 1)
-#define SCR_NS_BIT             (U(1) << 0)
-#define SCR_VALID_BIT_MASK     U(0x2f8f)
-#define SCR_RESET_VAL          SCR_RES1_BITS
-
-/* MDCR_EL3 definitions */
-#define MDCR_SPD32(x)          ((x) << 14)
-#define MDCR_SPD32_LEGACY      U(0x0)
-#define MDCR_SPD32_DISABLE     U(0x2)
-#define MDCR_SPD32_ENABLE      U(0x3)
-#define MDCR_SDD_BIT           (U(1) << 16)
-#define MDCR_NSPB(x)           ((x) << 12)
-#define MDCR_NSPB_EL1          U(0x3)
-#define MDCR_TDOSA_BIT         (U(1) << 10)
-#define MDCR_TDA_BIT           (U(1) << 9)
-#define MDCR_TPM_BIT           (U(1) << 6)
-#define MDCR_EL3_RESET_VAL     U(0x0)
-
-/* MDCR_EL2 definitions */
-#define MDCR_EL2_TPMS          (U(1) << 14)
-#define MDCR_EL2_E2PB(x)       ((x) << 12)
-#define MDCR_EL2_E2PB_EL1      U(0x3)
-#define MDCR_EL2_TDRA_BIT      (U(1) << 11)
-#define MDCR_EL2_TDOSA_BIT     (U(1) << 10)
-#define MDCR_EL2_TDA_BIT       (U(1) << 9)
-#define MDCR_EL2_TDE_BIT       (U(1) << 8)
-#define MDCR_EL2_HPME_BIT      (U(1) << 7)
-#define MDCR_EL2_TPM_BIT       (U(1) << 6)
-#define MDCR_EL2_TPMCR_BIT     (U(1) << 5)
-#define MDCR_EL2_RESET_VAL     U(0x0)
-
-/* HSTR_EL2 definitions */
-#define HSTR_EL2_RESET_VAL     U(0x0)
-#define HSTR_EL2_T_MASK                U(0xff)
-
-/* CNTHP_CTL_EL2 definitions */
-#define CNTHP_CTL_ENABLE_BIT   (U(1) << 0)
-#define CNTHP_CTL_RESET_VAL    U(0x0)
-
-/* VTTBR_EL2 definitions */
-#define VTTBR_RESET_VAL                ULL(0x0)
-#define VTTBR_VMID_MASK                ULL(0xff)
-#define VTTBR_VMID_SHIFT       U(48)
-#define VTTBR_BADDR_MASK       ULL(0xffffffffffff)
-#define VTTBR_BADDR_SHIFT      U(0)
-
-/* HCR definitions */
-#define HCR_API_BIT            (ULL(1) << 41)
-#define HCR_APK_BIT            (ULL(1) << 40)
-#define HCR_TGE_BIT            (ULL(1) << 27)
-#define HCR_RW_SHIFT           U(31)
-#define HCR_RW_BIT             (ULL(1) << HCR_RW_SHIFT)
-#define HCR_AMO_BIT            (ULL(1) << 5)
-#define HCR_IMO_BIT            (ULL(1) << 4)
-#define HCR_FMO_BIT            (ULL(1) << 3)
-
-/* ISR definitions */
-#define ISR_A_SHIFT            U(8)
-#define ISR_I_SHIFT            U(7)
-#define ISR_F_SHIFT            U(6)
-
-/* CNTHCTL_EL2 definitions */
-#define CNTHCTL_RESET_VAL      U(0x0)
-#define EVNTEN_BIT             (U(1) << 2)
-#define EL1PCEN_BIT            (U(1) << 1)
-#define EL1PCTEN_BIT           (U(1) << 0)
-
-/* CNTKCTL_EL1 definitions */
-#define EL0PTEN_BIT            (U(1) << 9)
-#define EL0VTEN_BIT            (U(1) << 8)
-#define EL0PCTEN_BIT           (U(1) << 0)
-#define EL0VCTEN_BIT           (U(1) << 1)
-#define EVNTEN_BIT             (U(1) << 2)
-#define EVNTDIR_BIT            (U(1) << 3)
-#define EVNTI_SHIFT            U(4)
-#define EVNTI_MASK             U(0xf)
-
-/* CPTR_EL3 definitions */
-#define TCPAC_BIT              (U(1) << 31)
-#define TAM_BIT                        (U(1) << 30)
-#define TTA_BIT                        (U(1) << 20)
-#define TFP_BIT                        (U(1) << 10)
-#define CPTR_EZ_BIT            (U(1) << 8)
-#define CPTR_EL3_RESET_VAL     U(0x0)
-
-/* CPTR_EL2 definitions */
-#define CPTR_EL2_RES1          ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
-#define CPTR_EL2_TCPAC_BIT     (U(1) << 31)
-#define CPTR_EL2_TAM_BIT       (U(1) << 30)
-#define CPTR_EL2_TTA_BIT       (U(1) << 20)
-#define CPTR_EL2_TFP_BIT       (U(1) << 10)
-#define CPTR_EL2_TZ_BIT                (U(1) << 8)
-#define CPTR_EL2_RESET_VAL     CPTR_EL2_RES1
-
-/* CPSR/SPSR definitions */
-#define DAIF_FIQ_BIT           (U(1) << 0)
-#define DAIF_IRQ_BIT           (U(1) << 1)
-#define DAIF_ABT_BIT           (U(1) << 2)
-#define DAIF_DBG_BIT           (U(1) << 3)
-#define SPSR_DAIF_SHIFT                U(6)
-#define SPSR_DAIF_MASK         U(0xf)
-
-#define SPSR_AIF_SHIFT         U(6)
-#define SPSR_AIF_MASK          U(0x7)
-
-#define SPSR_E_SHIFT           U(9)
-#define SPSR_E_MASK            U(0x1)
-#define SPSR_E_LITTLE          U(0x0)
-#define SPSR_E_BIG             U(0x1)
-
-#define SPSR_T_SHIFT           U(5)
-#define SPSR_T_MASK            U(0x1)
-#define SPSR_T_ARM             U(0x0)
-#define SPSR_T_THUMB           U(0x1)
-
-#define SPSR_M_SHIFT           U(4)
-#define SPSR_M_MASK            U(0x1)
-#define SPSR_M_AARCH64         U(0x0)
-#define SPSR_M_AARCH32         U(0x1)
-
-#define DISABLE_ALL_EXCEPTIONS \
-               (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
-
-#define DISABLE_INTERRUPTS     (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
-
-/*
- * RMR_EL3 definitions
- */
-#define RMR_EL3_RR_BIT         (U(1) << 1)
-#define RMR_EL3_AA64_BIT       (U(1) << 0)
-
-/*
- * HI-VECTOR address for AArch32 state
- */
-#define HI_VECTOR_BASE         U(0xFFFF0000)
-
-/*
- * TCR defintions
- */
-#define TCR_EL3_RES1           ((ULL(1) << 31) | (ULL(1) << 23))
-#define TCR_EL2_RES1           ((ULL(1) << 31) | (ULL(1) << 23))
-#define TCR_EL1_IPS_SHIFT      U(32)
-#define TCR_EL2_PS_SHIFT       U(16)
-#define TCR_EL3_PS_SHIFT       U(16)
-
-#define TCR_TxSZ_MIN           ULL(16)
-#define TCR_TxSZ_MAX           ULL(39)
-
-/* (internal) physical address size bits in EL3/EL1 */
-#define TCR_PS_BITS_4GB                ULL(0x0)
-#define TCR_PS_BITS_64GB       ULL(0x1)
-#define TCR_PS_BITS_1TB                ULL(0x2)
-#define TCR_PS_BITS_4TB                ULL(0x3)
-#define TCR_PS_BITS_16TB       ULL(0x4)
-#define TCR_PS_BITS_256TB      ULL(0x5)
-
-#define ADDR_MASK_48_TO_63     ULL(0xFFFF000000000000)
-#define ADDR_MASK_44_TO_47     ULL(0x0000F00000000000)
-#define ADDR_MASK_42_TO_43     ULL(0x00000C0000000000)
-#define ADDR_MASK_40_TO_41     ULL(0x0000030000000000)
-#define ADDR_MASK_36_TO_39     ULL(0x000000F000000000)
-#define ADDR_MASK_32_TO_35     ULL(0x0000000F00000000)
-
-#define TCR_RGN_INNER_NC       (ULL(0x0) << 8)
-#define TCR_RGN_INNER_WBA      (ULL(0x1) << 8)
-#define TCR_RGN_INNER_WT       (ULL(0x2) << 8)
-#define TCR_RGN_INNER_WBNA     (ULL(0x3) << 8)
-
-#define TCR_RGN_OUTER_NC       (ULL(0x0) << 10)
-#define TCR_RGN_OUTER_WBA      (ULL(0x1) << 10)
-#define TCR_RGN_OUTER_WT       (ULL(0x2) << 10)
-#define TCR_RGN_OUTER_WBNA     (ULL(0x3) << 10)
-
-#define TCR_SH_NON_SHAREABLE   (ULL(0x0) << 12)
-#define TCR_SH_OUTER_SHAREABLE (ULL(0x2) << 12)
-#define TCR_SH_INNER_SHAREABLE (ULL(0x3) << 12)
-
-#define TCR_TG0_SHIFT          U(14)
-#define TCR_TG0_MASK           ULL(3)
-#define TCR_TG0_4K             (ULL(0) << TCR_TG0_SHIFT)
-#define TCR_TG0_64K            (ULL(1) << TCR_TG0_SHIFT)
-#define TCR_TG0_16K            (ULL(2) << TCR_TG0_SHIFT)
-
-#define TCR_EPD0_BIT           (ULL(1) << 7)
-#define TCR_EPD1_BIT           (ULL(1) << 23)
-
-#define MODE_SP_SHIFT          U(0x0)
-#define MODE_SP_MASK           U(0x1)
-#define MODE_SP_EL0            U(0x0)
-#define MODE_SP_ELX            U(0x1)
-
-#define MODE_RW_SHIFT          U(0x4)
-#define MODE_RW_MASK           U(0x1)
-#define MODE_RW_64             U(0x0)
-#define MODE_RW_32             U(0x1)
-
-#define MODE_EL_SHIFT          U(0x2)
-#define MODE_EL_MASK           U(0x3)
-#define MODE_EL3               U(0x3)
-#define MODE_EL2               U(0x2)
-#define MODE_EL1               U(0x1)
-#define MODE_EL0               U(0x0)
-
-#define MODE32_SHIFT           U(0)
-#define MODE32_MASK            U(0xf)
-#define MODE32_usr             U(0x0)
-#define MODE32_fiq             U(0x1)
-#define MODE32_irq             U(0x2)
-#define MODE32_svc             U(0x3)
-#define MODE32_mon             U(0x6)
-#define MODE32_abt             U(0x7)
-#define MODE32_hyp             U(0xa)
-#define MODE32_und             U(0xb)
-#define MODE32_sys             U(0xf)
-
-#define GET_RW(mode)           (((mode) >> MODE_RW_SHIFT) & MODE_RW_MASK)
-#define GET_EL(mode)           (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
-#define GET_SP(mode)           (((mode) >> MODE_SP_SHIFT) & MODE_SP_MASK)
-#define GET_M32(mode)          (((mode) >> MODE32_SHIFT) & MODE32_MASK)
-
-#define SPSR_64(el, sp, daif)                          \
-       ((MODE_RW_64 << MODE_RW_SHIFT) |                \
-       (((el) & MODE_EL_MASK) << MODE_EL_SHIFT) |      \
-       (((sp) & MODE_SP_MASK) << MODE_SP_SHIFT) |      \
-       (((daif) & SPSR_DAIF_MASK) << SPSR_DAIF_SHIFT))
-
-#define SPSR_MODE32(mode, isa, endian, aif)            \
-       ((MODE_RW_32 << MODE_RW_SHIFT) |                \
-       (((mode) & MODE32_MASK) << MODE32_SHIFT) |      \
-       (((isa) & SPSR_T_MASK) << SPSR_T_SHIFT) |       \
-       (((endian) & SPSR_E_MASK) << SPSR_E_SHIFT) |    \
-       (((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT))
-
-/*
- * TTBR Definitions
- */
-#define TTBR_CNP_BIT           ULL(0x1)
-
-/*
- * CTR_EL0 definitions
- */
-#define CTR_CWG_SHIFT          U(24)
-#define CTR_CWG_MASK           U(0xf)
-#define CTR_ERG_SHIFT          U(20)
-#define CTR_ERG_MASK           U(0xf)
-#define CTR_DMINLINE_SHIFT     U(16)
-#define CTR_DMINLINE_MASK      U(0xf)
-#define CTR_L1IP_SHIFT         U(14)
-#define CTR_L1IP_MASK          U(0x3)
-#define CTR_IMINLINE_SHIFT     U(0)
-#define CTR_IMINLINE_MASK      U(0xf)
-
-#define MAX_CACHE_LINE_SIZE    U(0x800) /* 2KB */
-
-/* Physical timer control register bit fields shifts and masks */
-#define CNTP_CTL_ENABLE_SHIFT   U(0)
-#define CNTP_CTL_IMASK_SHIFT    U(1)
-#define CNTP_CTL_ISTATUS_SHIFT  U(2)
-
-#define CNTP_CTL_ENABLE_MASK    U(1)
-#define CNTP_CTL_IMASK_MASK     U(1)
-#define CNTP_CTL_ISTATUS_MASK   U(1)
-
-/* Exception Syndrome register bits and bobs */
-#define ESR_EC_SHIFT                   U(26)
-#define ESR_EC_MASK                    U(0x3f)
-#define ESR_EC_LENGTH                  U(6)
-#define EC_UNKNOWN                     U(0x0)
-#define EC_WFE_WFI                     U(0x1)
-#define EC_AARCH32_CP15_MRC_MCR                U(0x3)
-#define EC_AARCH32_CP15_MRRC_MCRR      U(0x4)
-#define EC_AARCH32_CP14_MRC_MCR                U(0x5)
-#define EC_AARCH32_CP14_LDC_STC                U(0x6)
-#define EC_FP_SIMD                     U(0x7)
-#define EC_AARCH32_CP10_MRC            U(0x8)
-#define EC_AARCH32_CP14_MRRC_MCRR      U(0xc)
-#define EC_ILLEGAL                     U(0xe)
-#define EC_AARCH32_SVC                 U(0x11)
-#define EC_AARCH32_HVC                 U(0x12)
-#define EC_AARCH32_SMC                 U(0x13)
-#define EC_AARCH64_SVC                 U(0x15)
-#define EC_AARCH64_HVC                 U(0x16)
-#define EC_AARCH64_SMC                 U(0x17)
-#define EC_AARCH64_SYS                 U(0x18)
-#define EC_IABORT_LOWER_EL             U(0x20)
-#define EC_IABORT_CUR_EL               U(0x21)
-#define EC_PC_ALIGN                    U(0x22)
-#define EC_DABORT_LOWER_EL             U(0x24)
-#define EC_DABORT_CUR_EL               U(0x25)
-#define EC_SP_ALIGN                    U(0x26)
-#define EC_AARCH32_FP                  U(0x28)
-#define EC_AARCH64_FP                  U(0x2c)
-#define EC_SERROR                      U(0x2f)
-
-/*
- * External Abort bit in Instruction and Data Aborts synchronous exception
- * syndromes.
- */
-#define ESR_ISS_EABORT_EA_BIT          U(9)
-
-#define EC_BITS(x)                     (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
-
-/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
-#define RMR_RESET_REQUEST_SHIFT        U(0x1)
-#define RMR_WARM_RESET_CPU             (U(1) << RMR_RESET_REQUEST_SHIFT)
-
-/*******************************************************************************
- * Definitions of register offsets, fields and macros for CPU system
- * instructions.
- ******************************************************************************/
-
-#define TLBI_ADDR_SHIFT                U(12)
-#define TLBI_ADDR_MASK         ULL(0x00000FFFFFFFFFFF)
-#define TLBI_ADDR(x)           (((x) >> TLBI_ADDR_SHIFT) & TLBI_ADDR_MASK)
-
-/*******************************************************************************
- * Definitions of register offsets and fields in the CNTCTLBase Frame of the
- * system level implementation of the Generic Timer.
- ******************************************************************************/
-#define CNTCTLBASE_CNTFRQ      U(0x0)
-#define CNTNSAR                        U(0x4)
-#define CNTNSAR_NS_SHIFT(x)    (x)
-
-#define CNTACR_BASE(x)         (U(0x40) + ((x) << 2))
-#define CNTACR_RPCT_SHIFT      U(0x0)
-#define CNTACR_RVCT_SHIFT      U(0x1)
-#define CNTACR_RFRQ_SHIFT      U(0x2)
-#define CNTACR_RVOFF_SHIFT     U(0x3)
-#define CNTACR_RWVT_SHIFT      U(0x4)
-#define CNTACR_RWPT_SHIFT      U(0x5)
-
-/*******************************************************************************
- * Definitions of register offsets and fields in the CNTBaseN Frame of the
- * system level implementation of the Generic Timer.
- ******************************************************************************/
-/* Physical Count register. */
-#define CNTPCT_LO              U(0x0)
-/* Counter Frequency register. */
-#define CNTBASEN_CNTFRQ                U(0x10)
-/* Physical Timer CompareValue register. */
-#define CNTP_CVAL_LO           U(0x20)
-/* Physical Timer Control register. */
-#define CNTP_CTL               U(0x2c)
-
-/* PMCR_EL0 definitions */
-#define PMCR_EL0_RESET_VAL     U(0x0)
-#define PMCR_EL0_N_SHIFT       U(11)
-#define PMCR_EL0_N_MASK                U(0x1f)
-#define PMCR_EL0_N_BITS                (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT)
-#define PMCR_EL0_LC_BIT                (U(1) << 6)
-#define PMCR_EL0_DP_BIT                (U(1) << 5)
-#define PMCR_EL0_X_BIT         (U(1) << 4)
-#define PMCR_EL0_D_BIT         (U(1) << 3)
-
-/*******************************************************************************
- * Definitions for system register interface to SVE
- ******************************************************************************/
-#define ZCR_EL3                        S3_6_C1_C2_0
-#define ZCR_EL2                        S3_4_C1_C2_0
-
-/* ZCR_EL3 definitions */
-#define ZCR_EL3_LEN_MASK       U(0xf)
-
-/* ZCR_EL2 definitions */
-#define ZCR_EL2_LEN_MASK       U(0xf)
-
-/*******************************************************************************
- * Definitions of MAIR encodings for device and normal memory
- ******************************************************************************/
-/*
- * MAIR encodings for device memory attributes.
- */
-#define MAIR_DEV_nGnRnE                ULL(0x0)
-#define MAIR_DEV_nGnRE         ULL(0x4)
-#define MAIR_DEV_nGRE          ULL(0x8)
-#define MAIR_DEV_GRE           ULL(0xc)
-
-/*
- * MAIR encodings for normal memory attributes.
- *
- * Cache Policy
- *  WT:         Write Through
- *  WB:         Write Back
- *  NC:         Non-Cacheable
- *
- * Transient Hint
- *  NTR: Non-Transient
- *  TR:         Transient
- *
- * Allocation Policy
- *  RA:         Read Allocate
- *  WA:         Write Allocate
- *  RWA: Read and Write Allocate
- *  NA:         No Allocation
- */
-#define MAIR_NORM_WT_TR_WA     ULL(0x1)
-#define MAIR_NORM_WT_TR_RA     ULL(0x2)
-#define MAIR_NORM_WT_TR_RWA    ULL(0x3)
-#define MAIR_NORM_NC           ULL(0x4)
-#define MAIR_NORM_WB_TR_WA     ULL(0x5)
-#define MAIR_NORM_WB_TR_RA     ULL(0x6)
-#define MAIR_NORM_WB_TR_RWA    ULL(0x7)
-#define MAIR_NORM_WT_NTR_NA    ULL(0x8)
-#define MAIR_NORM_WT_NTR_WA    ULL(0x9)
-#define MAIR_NORM_WT_NTR_RA    ULL(0xa)
-#define MAIR_NORM_WT_NTR_RWA   ULL(0xb)
-#define MAIR_NORM_WB_NTR_NA    ULL(0xc)
-#define MAIR_NORM_WB_NTR_WA    ULL(0xd)
-#define MAIR_NORM_WB_NTR_RA    ULL(0xe)
-#define MAIR_NORM_WB_NTR_RWA   ULL(0xf)
-
-#define MAIR_NORM_OUTER_SHIFT  U(4)
-
-#define MAKE_MAIR_NORMAL_MEMORY(inner, outer)  \
-               ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
-
-/* PAR_EL1 fields */
-#define PAR_F_SHIFT    U(0)
-#define PAR_F_MASK     ULL(0x1)
-#define PAR_ADDR_SHIFT U(12)
-#define PAR_ADDR_MASK  (BIT(40) - ULL(1)) /* 40-bits-wide page address */
-
-/*******************************************************************************
- * Definitions for system register interface to SPE
- ******************************************************************************/
-#define PMBLIMITR_EL1          S3_0_C9_C10_0
-
-/*******************************************************************************
- * Definitions for system register interface to MPAM
- ******************************************************************************/
-#define MPAMIDR_EL1            S3_0_C10_C4_4
-#define MPAM2_EL2              S3_4_C10_C5_0
-#define MPAMHCR_EL2            S3_4_C10_C4_0
-#define MPAM3_EL3              S3_6_C10_C5_0
-
-/*******************************************************************************
- * Definitions for system register interface to AMU for ARMv8.4 onwards
- ******************************************************************************/
-#define AMCR_EL0               S3_3_C13_C2_0
-#define AMCFGR_EL0             S3_3_C13_C2_1
-#define AMCGCR_EL0             S3_3_C13_C2_2
-#define AMUSERENR_EL0          S3_3_C13_C2_3
-#define AMCNTENCLR0_EL0                S3_3_C13_C2_4
-#define AMCNTENSET0_EL0                S3_3_C13_C2_5
-#define AMCNTENCLR1_EL0                S3_3_C13_C3_0
-#define AMCNTENSET1_EL0                S3_3_C13_C3_1
-
-/* Activity Monitor Group 0 Event Counter Registers */
-#define AMEVCNTR00_EL0         S3_3_C13_C4_0
-#define AMEVCNTR01_EL0         S3_3_C13_C4_1
-#define AMEVCNTR02_EL0         S3_3_C13_C4_2
-#define AMEVCNTR03_EL0         S3_3_C13_C4_3
-
-/* Activity Monitor Group 0 Event Type Registers */
-#define AMEVTYPER00_EL0                S3_3_C13_C6_0
-#define AMEVTYPER01_EL0                S3_3_C13_C6_1
-#define AMEVTYPER02_EL0                S3_3_C13_C6_2
-#define AMEVTYPER03_EL0                S3_3_C13_C6_3
-
-/* Activity Monitor Group 1 Event Counter Registers */
-#define AMEVCNTR10_EL0         S3_3_C13_C12_0
-#define AMEVCNTR11_EL0         S3_3_C13_C12_1
-#define AMEVCNTR12_EL0         S3_3_C13_C12_2
-#define AMEVCNTR13_EL0         S3_3_C13_C12_3
-#define AMEVCNTR14_EL0         S3_3_C13_C12_4
-#define AMEVCNTR15_EL0         S3_3_C13_C12_5
-#define AMEVCNTR16_EL0         S3_3_C13_C12_6
-#define AMEVCNTR17_EL0         S3_3_C13_C12_7
-#define AMEVCNTR18_EL0         S3_3_C13_C13_0
-#define AMEVCNTR19_EL0         S3_3_C13_C13_1
-#define AMEVCNTR1A_EL0         S3_3_C13_C13_2
-#define AMEVCNTR1B_EL0         S3_3_C13_C13_3
-#define AMEVCNTR1C_EL0         S3_3_C13_C13_4
-#define AMEVCNTR1D_EL0         S3_3_C13_C13_5
-#define AMEVCNTR1E_EL0         S3_3_C13_C13_6
-#define AMEVCNTR1F_EL0         S3_3_C13_C13_7
-
-/* Activity Monitor Group 1 Event Type Registers */
-#define AMEVTYPER10_EL0                S3_3_C13_C14_0
-#define AMEVTYPER11_EL0                S3_3_C13_C14_1
-#define AMEVTYPER12_EL0                S3_3_C13_C14_2
-#define AMEVTYPER13_EL0                S3_3_C13_C14_3
-#define AMEVTYPER14_EL0                S3_3_C13_C14_4
-#define AMEVTYPER15_EL0                S3_3_C13_C14_5
-#define AMEVTYPER16_EL0                S3_3_C13_C14_6
-#define AMEVTYPER17_EL0                S3_3_C13_C14_7
-#define AMEVTYPER18_EL0                S3_3_C13_C15_0
-#define AMEVTYPER19_EL0                S3_3_C13_C15_1
-#define AMEVTYPER1A_EL0                S3_3_C13_C15_2
-#define AMEVTYPER1B_EL0                S3_3_C13_C15_3
-#define AMEVTYPER1C_EL0                S3_3_C13_C15_4
-#define AMEVTYPER1D_EL0                S3_3_C13_C15_5
-#define AMEVTYPER1E_EL0                S3_3_C13_C15_6
-#define AMEVTYPER1F_EL0                S3_3_C13_C15_7
-
-/* AMCGCR_EL0 definitions */
-#define AMCGCR_EL0_CG1NC_SHIFT U(8)
-#define AMCGCR_EL0_CG1NC_LENGTH        U(8)
-#define AMCGCR_EL0_CG1NC_MASK  U(0xff)
-
-/* MPAM register definitions */
-#define MPAM3_EL3_MPAMEN_BIT           (ULL(1) << 63)
-
-#define MPAMIDR_HAS_HCR_BIT            (ULL(1) << 17)
-
-/*******************************************************************************
- * RAS system registers
- ******************************************************************************/
-#define DISR_EL1               S3_0_C12_C1_1
-#define DISR_A_BIT             U(31)
-
-#define ERRIDR_EL1             S3_0_C5_C3_0
-#define ERRIDR_MASK            U(0xffff)
-
-#define ERRSELR_EL1            S3_0_C5_C3_1
-
-/* System register access to Standard Error Record registers */
-#define ERXFR_EL1              S3_0_C5_C4_0
-#define ERXCTLR_EL1            S3_0_C5_C4_1
-#define ERXSTATUS_EL1          S3_0_C5_C4_2
-#define ERXADDR_EL1            S3_0_C5_C4_3
-#define ERXPFGF_EL1            S3_0_C5_C4_4
-#define ERXPFGCTL_EL1          S3_0_C5_C4_5
-#define ERXPFGCDN_EL1          S3_0_C5_C4_6
-#define ERXMISC0_EL1           S3_0_C5_C5_0
-#define ERXMISC1_EL1           S3_0_C5_C5_1
-
-#define ERXCTLR_ED_BIT         (U(1) << 0)
-#define ERXCTLR_UE_BIT         (U(1) << 4)
-
-#define ERXPFGCTL_UC_BIT       (U(1) << 1)
-#define ERXPFGCTL_UEU_BIT      (U(1) << 2)
-#define ERXPFGCTL_CDEN_BIT     (U(1) << 31)
-
-/*******************************************************************************
- * Armv8.3 Pointer Authentication Registers
- ******************************************************************************/
-#define APGAKeyLo_EL1          S3_0_C2_C3_0
-
-/*******************************************************************************
- * Armv8.4 Data Independent Timing Registers
- ******************************************************************************/
-#define DIT                    S3_3_C4_C2_5
-#define DIT_BIT                        BIT(24)
-
-#endif /* ARCH_H */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
deleted file mode 100644 (file)
index 7222b9d..0000000
+++ /dev/null
@@ -1,503 +0,0 @@
-/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ARCH_HELPERS_H
-#define ARCH_HELPERS_H
-
-#include <arch.h>
-#include <cdefs.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <string.h>
-
-/**********************************************************************
- * Macros which create inline functions to read or write CPU system
- * registers
- *********************************************************************/
-
-#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)             \
-static inline u_register_t read_ ## _name(void)                        \
-{                                                              \
-       u_register_t v;                                         \
-       __asm__ volatile ("mrs %0, " #_reg_name : "=r" (v));    \
-       return v;                                               \
-}
-
-#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)                    \
-static inline void write_ ## _name(u_register_t v)                     \
-{                                                                      \
-       __asm__ volatile ("msr " #_reg_name ", %0" : : "r" (v));        \
-}
-
-#define SYSREG_WRITE_CONST(reg_name, v)                                \
-       __asm__ volatile ("msr " #reg_name ", %0" : : "i" (v))
-
-/* Define read function for system register */
-#define DEFINE_SYSREG_READ_FUNC(_name)                         \
-       _DEFINE_SYSREG_READ_FUNC(_name, _name)
-
-/* Define read & write function for system register */
-#define DEFINE_SYSREG_RW_FUNCS(_name)                  \
-       _DEFINE_SYSREG_READ_FUNC(_name, _name)          \
-       _DEFINE_SYSREG_WRITE_FUNC(_name, _name)
-
-/* Define read & write function for renamed system register */
-#define DEFINE_RENAME_SYSREG_RW_FUNCS(_name, _reg_name)        \
-       _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)      \
-       _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
-
-/* Define read function for renamed system register */
-#define DEFINE_RENAME_SYSREG_READ_FUNC(_name, _reg_name)       \
-       _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)
-
-/* Define write function for renamed system register */
-#define DEFINE_RENAME_SYSREG_WRITE_FUNC(_name, _reg_name)      \
-       _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
-
-/**********************************************************************
- * Macros to create inline functions for system instructions
- *********************************************************************/
-
-/* Define function for simple system instruction */
-#define DEFINE_SYSOP_FUNC(_op)                         \
-static inline void _op(void)                           \
-{                                                      \
-       __asm__ (#_op);                                 \
-}
-
-/* Define function for system instruction with type specifier */
-#define DEFINE_SYSOP_TYPE_FUNC(_op, _type)             \
-static inline void _op ## _type(void)                  \
-{                                                      \
-       __asm__ (#_op " " #_type);                      \
-}
-
-/* Define function for system instruction with register parameter */
-#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type)       \
-static inline void _op ## _type(uint64_t v)            \
-{                                                      \
-        __asm__ (#_op " " #_type ", %0" : : "r" (v));  \
-}
-
-/*******************************************************************************
- * TLB maintenance accessor prototypes
- ******************************************************************************/
-
-#if ERRATA_A57_813419
-/*
- * Define function for TLBI instruction with type specifier that implements
- * the workaround for errata 813419 of Cortex-A57.
- */
-#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(_type)\
-static inline void tlbi ## _type(void)                 \
-{                                                      \
-       __asm__("tlbi " #_type "\n"                     \
-               "dsb ish\n"                             \
-               "tlbi " #_type);                        \
-}
-
-/*
- * Define function for TLBI instruction with register parameter that implements
- * the workaround for errata 813419 of Cortex-A57.
- */
-#define DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(_type) \
-static inline void tlbi ## _type(uint64_t v)                   \
-{                                                              \
-       __asm__("tlbi " #_type ", %0\n"                         \
-               "dsb ish\n"                                     \
-               "tlbi " #_type ", %0" : : "r" (v));             \
-}
-#endif /* ERRATA_A57_813419 */
-
-DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
-DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
-DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
-DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
-#if ERRATA_A57_813419
-DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3)
-DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_FUNC(alle3is)
-#else
-DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
-DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
-#endif
-DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
-
-DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaae1is)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vaale1is)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae2is)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale2is)
-#if ERRATA_A57_813419
-DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vae3is)
-DEFINE_TLBIOP_ERRATA_A57_813419_TYPE_PARAM_FUNC(vale3is)
-#else
-DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vae3is)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(tlbi, vale3is)
-#endif
-
-/*******************************************************************************
- * Cache maintenance accessor prototypes
- ******************************************************************************/
-DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, isw)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cisw)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, csw)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvac)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, ivac)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, civac)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
-
-/*******************************************************************************
- * Address translation accessor prototypes
- ******************************************************************************/
-DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1r)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1w)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0r)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e1r)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e2r)
-DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e3r)
-
-void flush_dcache_range(uintptr_t addr, size_t size);
-void clean_dcache_range(uintptr_t addr, size_t size);
-void inv_dcache_range(uintptr_t addr, size_t size);
-
-void dcsw_op_louis(u_register_t op_type);
-void dcsw_op_all(u_register_t op_type);
-
-void disable_mmu_el1(void);
-void disable_mmu_el3(void);
-void disable_mmu_icache_el1(void);
-void disable_mmu_icache_el3(void);
-
-/*******************************************************************************
- * Misc. accessor prototypes
- ******************************************************************************/
-
-#define write_daifclr(val) SYSREG_WRITE_CONST(daifclr, val)
-#define write_daifset(val) SYSREG_WRITE_CONST(daifset, val)
-
-DEFINE_SYSREG_RW_FUNCS(par_el1)
-DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
-DEFINE_SYSREG_READ_FUNC(id_aa64isar1_el1)
-DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
-DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1)
-DEFINE_SYSREG_READ_FUNC(CurrentEl)
-DEFINE_SYSREG_READ_FUNC(ctr_el0)
-DEFINE_SYSREG_RW_FUNCS(daif)
-DEFINE_SYSREG_RW_FUNCS(spsr_el1)
-DEFINE_SYSREG_RW_FUNCS(spsr_el2)
-DEFINE_SYSREG_RW_FUNCS(spsr_el3)
-DEFINE_SYSREG_RW_FUNCS(elr_el1)
-DEFINE_SYSREG_RW_FUNCS(elr_el2)
-DEFINE_SYSREG_RW_FUNCS(elr_el3)
-
-DEFINE_SYSOP_FUNC(wfi)
-DEFINE_SYSOP_FUNC(wfe)
-DEFINE_SYSOP_FUNC(sev)
-DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
-DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
-DEFINE_SYSOP_TYPE_FUNC(dmb, st)
-DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
-DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
-DEFINE_SYSOP_TYPE_FUNC(dsb, nsh)
-DEFINE_SYSOP_TYPE_FUNC(dsb, ishst)
-DEFINE_SYSOP_TYPE_FUNC(dmb, oshld)
-DEFINE_SYSOP_TYPE_FUNC(dmb, oshst)
-DEFINE_SYSOP_TYPE_FUNC(dmb, osh)
-DEFINE_SYSOP_TYPE_FUNC(dmb, nshld)
-DEFINE_SYSOP_TYPE_FUNC(dmb, nshst)
-DEFINE_SYSOP_TYPE_FUNC(dmb, nsh)
-DEFINE_SYSOP_TYPE_FUNC(dmb, ishld)
-DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
-DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
-DEFINE_SYSOP_FUNC(isb)
-
-static inline void enable_irq(void)
-{
-       /*
-        * The compiler memory barrier will prevent the compiler from
-        * scheduling non-volatile memory access after the write to the
-        * register.
-        *
-        * This could happen if some initialization code issues non-volatile
-        * accesses to an area used by an interrupt handler, in the assumption
-        * that it is safe as the interrupts are disabled at the time it does
-        * that (according to program order). However, non-volatile accesses
-        * are not necessarily in program order relatively with volatile inline
-        * assembly statements (and volatile accesses).
-        */
-       COMPILER_BARRIER();
-       write_daifclr(DAIF_IRQ_BIT);
-       isb();
-}
-
-static inline void enable_fiq(void)
-{
-       COMPILER_BARRIER();
-       write_daifclr(DAIF_FIQ_BIT);
-       isb();
-}
-
-static inline void enable_serror(void)
-{
-       COMPILER_BARRIER();
-       write_daifclr(DAIF_ABT_BIT);
-       isb();
-}
-
-static inline void enable_debug_exceptions(void)
-{
-       COMPILER_BARRIER();
-       write_daifclr(DAIF_DBG_BIT);
-       isb();
-}
-
-static inline void disable_irq(void)
-{
-       COMPILER_BARRIER();
-       write_daifset(DAIF_IRQ_BIT);
-       isb();
-}
-
-static inline void disable_fiq(void)
-{
-       COMPILER_BARRIER();
-       write_daifset(DAIF_FIQ_BIT);
-       isb();
-}
-
-static inline void disable_serror(void)
-{
-       COMPILER_BARRIER();
-       write_daifset(DAIF_ABT_BIT);
-       isb();
-}
-
-static inline void disable_debug_exceptions(void)
-{
-       COMPILER_BARRIER();
-       write_daifset(DAIF_DBG_BIT);
-       isb();
-}
-
-#if !ERROR_DEPRECATED
-uint32_t get_afflvl_shift(uint32_t);
-uint32_t mpidr_mask_lower_afflvls(uint64_t, uint32_t);
-
-void __dead2 eret(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
-                 uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
-#endif
-void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
-                uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
-
-/*******************************************************************************
- * System register accessor prototypes
- ******************************************************************************/
-DEFINE_SYSREG_READ_FUNC(midr_el1)
-DEFINE_SYSREG_READ_FUNC(mpidr_el1)
-DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
-
-DEFINE_SYSREG_RW_FUNCS(scr_el3)
-DEFINE_SYSREG_RW_FUNCS(hcr_el2)
-
-DEFINE_SYSREG_RW_FUNCS(vbar_el1)
-DEFINE_SYSREG_RW_FUNCS(vbar_el2)
-DEFINE_SYSREG_RW_FUNCS(vbar_el3)
-
-DEFINE_SYSREG_RW_FUNCS(sctlr_el1)
-DEFINE_SYSREG_RW_FUNCS(sctlr_el2)
-DEFINE_SYSREG_RW_FUNCS(sctlr_el3)
-
-DEFINE_SYSREG_RW_FUNCS(actlr_el1)
-DEFINE_SYSREG_RW_FUNCS(actlr_el2)
-DEFINE_SYSREG_RW_FUNCS(actlr_el3)
-
-DEFINE_SYSREG_RW_FUNCS(esr_el1)
-DEFINE_SYSREG_RW_FUNCS(esr_el2)
-DEFINE_SYSREG_RW_FUNCS(esr_el3)
-
-DEFINE_SYSREG_RW_FUNCS(afsr0_el1)
-DEFINE_SYSREG_RW_FUNCS(afsr0_el2)
-DEFINE_SYSREG_RW_FUNCS(afsr0_el3)
-
-DEFINE_SYSREG_RW_FUNCS(afsr1_el1)
-DEFINE_SYSREG_RW_FUNCS(afsr1_el2)
-DEFINE_SYSREG_RW_FUNCS(afsr1_el3)
-
-DEFINE_SYSREG_RW_FUNCS(far_el1)
-DEFINE_SYSREG_RW_FUNCS(far_el2)
-DEFINE_SYSREG_RW_FUNCS(far_el3)
-
-DEFINE_SYSREG_RW_FUNCS(mair_el1)
-DEFINE_SYSREG_RW_FUNCS(mair_el2)
-DEFINE_SYSREG_RW_FUNCS(mair_el3)
-
-DEFINE_SYSREG_RW_FUNCS(amair_el1)
-DEFINE_SYSREG_RW_FUNCS(amair_el2)
-DEFINE_SYSREG_RW_FUNCS(amair_el3)
-
-DEFINE_SYSREG_READ_FUNC(rvbar_el1)
-DEFINE_SYSREG_READ_FUNC(rvbar_el2)
-DEFINE_SYSREG_READ_FUNC(rvbar_el3)
-
-DEFINE_SYSREG_RW_FUNCS(rmr_el1)
-DEFINE_SYSREG_RW_FUNCS(rmr_el2)
-DEFINE_SYSREG_RW_FUNCS(rmr_el3)
-
-DEFINE_SYSREG_RW_FUNCS(tcr_el1)
-DEFINE_SYSREG_RW_FUNCS(tcr_el2)
-DEFINE_SYSREG_RW_FUNCS(tcr_el3)
-
-DEFINE_SYSREG_RW_FUNCS(ttbr0_el1)
-DEFINE_SYSREG_RW_FUNCS(ttbr0_el2)
-DEFINE_SYSREG_RW_FUNCS(ttbr0_el3)
-
-DEFINE_SYSREG_RW_FUNCS(ttbr1_el1)
-
-DEFINE_SYSREG_RW_FUNCS(vttbr_el2)
-
-DEFINE_SYSREG_RW_FUNCS(cptr_el2)
-DEFINE_SYSREG_RW_FUNCS(cptr_el3)
-
-DEFINE_SYSREG_RW_FUNCS(cpacr_el1)
-DEFINE_SYSREG_RW_FUNCS(cntfrq_el0)
-DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2)
-DEFINE_SYSREG_RW_FUNCS(cnthp_tval_el2)
-DEFINE_SYSREG_RW_FUNCS(cnthp_cval_el2)
-DEFINE_SYSREG_RW_FUNCS(cntps_ctl_el1)
-DEFINE_SYSREG_RW_FUNCS(cntps_tval_el1)
-DEFINE_SYSREG_RW_FUNCS(cntps_cval_el1)
-DEFINE_SYSREG_RW_FUNCS(cntp_ctl_el0)
-DEFINE_SYSREG_RW_FUNCS(cntp_tval_el0)
-DEFINE_SYSREG_RW_FUNCS(cntp_cval_el0)
-DEFINE_SYSREG_READ_FUNC(cntpct_el0)
-DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
-
-#define get_cntp_ctl_enable(x)  (((x) >> CNTP_CTL_ENABLE_SHIFT) & \
-                                       CNTP_CTL_ENABLE_MASK)
-#define get_cntp_ctl_imask(x)   (((x) >> CNTP_CTL_IMASK_SHIFT) & \
-                                       CNTP_CTL_IMASK_MASK)
-#define get_cntp_ctl_istatus(x) (((x) >> CNTP_CTL_ISTATUS_SHIFT) & \
-                                       CNTP_CTL_ISTATUS_MASK)
-
-#define set_cntp_ctl_enable(x)  ((x) |= (U(1) << CNTP_CTL_ENABLE_SHIFT))
-#define set_cntp_ctl_imask(x)   ((x) |= (U(1) << CNTP_CTL_IMASK_SHIFT))
-
-#define clr_cntp_ctl_enable(x)  ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
-#define clr_cntp_ctl_imask(x)   ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
-
-DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
-
-DEFINE_SYSREG_RW_FUNCS(cntvoff_el2)
-
-DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
-DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
-
-DEFINE_SYSREG_READ_FUNC(isr_el1)
-
-DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
-DEFINE_SYSREG_RW_FUNCS(mdcr_el3)
-DEFINE_SYSREG_RW_FUNCS(hstr_el2)
-DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
-
-/* GICv3 System Registers */
-
-DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
-DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
-DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
-DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
-DEFINE_RENAME_SYSREG_READ_FUNC(icc_rpr_el1, ICC_RPR_EL1)
-DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el3, ICC_IGRPEN1_EL3)
-DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen1_el1, ICC_IGRPEN1_EL1)
-DEFINE_RENAME_SYSREG_RW_FUNCS(icc_igrpen0_el1, ICC_IGRPEN0_EL1)
-DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir0_el1, ICC_HPPIR0_EL1)
-DEFINE_RENAME_SYSREG_READ_FUNC(icc_hppir1_el1, ICC_HPPIR1_EL1)
-DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar0_el1, ICC_IAR0_EL1)
-DEFINE_RENAME_SYSREG_READ_FUNC(icc_iar1_el1, ICC_IAR1_EL1)
-DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
-DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
-DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
-DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
-
-DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
-DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
-DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
-DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
-DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
-
-DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
-DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
-DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
-DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
-
-DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
-
-DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
-DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
-
-DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
-DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
-
-DEFINE_RENAME_SYSREG_READ_FUNC(erxfr_el1, ERXFR_EL1)
-DEFINE_RENAME_SYSREG_RW_FUNCS(erxctlr_el1, ERXCTLR_EL1)
-DEFINE_RENAME_SYSREG_RW_FUNCS(erxstatus_el1, ERXSTATUS_EL1)
-DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
-DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
-DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
-
-/* Armv8.3 Pointer Authentication Registers */
-DEFINE_RENAME_SYSREG_RW_FUNCS(apgakeylo_el1, APGAKeyLo_EL1)
-
-#define IS_IN_EL(x) \
-       (GET_EL(read_CurrentEl()) == MODE_EL##x)
-
-#define IS_IN_EL1() IS_IN_EL(1)
-#define IS_IN_EL2() IS_IN_EL(2)
-#define IS_IN_EL3() IS_IN_EL(3)
-
-static inline unsigned int get_current_el(void)
-{
-       return GET_EL(read_CurrentEl());
-}
-
-/*
- * Check if an EL is implemented from AA64PFR0 register fields.
- */
-static inline uint64_t el_implemented(unsigned int el)
-{
-       if (el > 3U) {
-               return EL_IMPL_NONE;
-       } else {
-               unsigned int shift = ID_AA64PFR0_EL1_SHIFT * el;
-
-               return (read_id_aa64pfr0_el1() >> shift) & ID_AA64PFR0_ELX_MASK;
-       }
-}
-
-#if !ERROR_DEPRECATED
-#define EL_IMPLEMENTED(_el)    el_implemented(_el)
-#endif
-
-/* Previously defined accesor functions with incomplete register names  */
-
-#define read_current_el()      read_CurrentEl()
-
-#define dsb()                  dsbsy()
-
-#define read_midr()            read_midr_el1()
-
-#define read_mpidr()           read_mpidr_el1()
-
-#define read_scr()             read_scr_el3()
-#define write_scr(_v)          write_scr_el3(_v)
-
-#define read_hcr()             read_hcr_el2()
-#define write_hcr(_v)          write_hcr_el2(_v)
-
-#define read_cpacr()           read_cpacr_el1()
-#define write_cpacr(_v)                write_cpacr_el1(_v)
-
-#endif /* ARCH_HELPERS_H */
diff --git a/include/lib/aarch64/setjmp.h b/include/lib/aarch64/setjmp.h
deleted file mode 100644 (file)
index bbfe1df..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SETJMP_H
-#define SETJMP_H
-
-#define JMP_CTX_X19    0x0
-#define JMP_CTX_X21    0x10
-#define JMP_CTX_X23    0x20
-#define JMP_CTX_X25    0x30
-#define JMP_CTX_X27    0x40
-#define JMP_CTX_X29    0x50
-#define JMP_CTX_SP     0x60
-#define JMP_CTX_END    0x70
-
-#define JMP_SIZE       (JMP_CTX_END >> 3)
-
-#ifndef __ASSEMBLY__
-
-#include <stdint.h>
-
-/* Jump buffer hosting x18 - x30 and sp_el0 registers */
-struct jmpbuf {
-       uint64_t buf[JMP_SIZE];
-} __aligned(16);
-
-
-/*
- * Set a jump point, and populate the jump buffer with context information so
- * that longjmp() can jump later. The caller must adhere to the following
- * conditions:
- *
- *  - After calling this function, the stack must not be shrunk. The contents of
- *    the stack must not be changed either.
- *
- *  - If the caller were to 'return', the buffer must be considered invalid, and
- *    must not be used with longjmp().
- *
- * The caller will observe this function returning at two distinct
- * circumstances, each with different return values:
- *
- *  - Zero, when the buffer is setup;
- *
- *  - Non-zero, when a call to longjmp() is made (presumably by one of the
- *    callee functions) with the same jump buffer.
- */
-int setjmp(struct jmpbuf *buf);
-
-/*
- * Reset execution to a jump point, and restore context information according to
- * the jump buffer populated by setjmp().
- */
-void longjmp(struct jmpbuf *buf);
-
-#endif /* __ASSEMBLY__ */
-#endif /* SETJMP_H */
diff --git a/include/lib/aarch64/smccc_helpers.h b/include/lib/aarch64/smccc_helpers.h
deleted file mode 100644 (file)
index efab18b..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SMCCC_HELPERS_H
-#define SMCCC_HELPERS_H
-
-#include <smccc.h>
-
-#ifndef __ASSEMBLY__
-#include <context.h>
-#include <stdbool.h>
-
-/* Convenience macros to return from SMC handler */
-#define SMC_RET0(_h)   {                                       \
-       return (uint64_t) (_h);                                 \
-}
-#define SMC_RET1(_h, _x0)      {                               \
-       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X0), (_x0));     \
-       SMC_RET0(_h);                                           \
-}
-#define SMC_RET2(_h, _x0, _x1) {                               \
-       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X1), (_x1));     \
-       SMC_RET1(_h, (_x0));                                    \
-}
-#define SMC_RET3(_h, _x0, _x1, _x2)    {                       \
-       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X2), (_x2));     \
-       SMC_RET2(_h, (_x0), (_x1));                             \
-}
-#define SMC_RET4(_h, _x0, _x1, _x2, _x3)       {               \
-       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X3), (_x3));     \
-       SMC_RET3(_h, (_x0), (_x1), (_x2));                      \
-}
-#define SMC_RET5(_h, _x0, _x1, _x2, _x3, _x4)  {               \
-       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X4), (_x4));     \
-       SMC_RET4(_h, (_x0), (_x1), (_x2), (_x3));               \
-}
-#define SMC_RET6(_h, _x0, _x1, _x2, _x3, _x4, _x5)     {       \
-       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X5), (_x5));     \
-       SMC_RET5(_h, (_x0), (_x1), (_x2), (_x3), (_x4));        \
-}
-#define SMC_RET7(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6)        {       \
-       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X6), (_x6));     \
-       SMC_RET6(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5)); \
-}
-#define SMC_RET8(_h, _x0, _x1, _x2, _x3, _x4, _x5, _x6, _x7) { \
-       write_ctx_reg((get_gpregs_ctx(_h)), (CTX_GPREG_X7), (_x7));     \
-       SMC_RET7(_h, (_x0), (_x1), (_x2), (_x3), (_x4), (_x5), (_x6));  \
-}
-
-/*
- * Convenience macros to access general purpose registers using handle provided
- * to SMC handler. These take the offset values defined in context.h
- */
-#define SMC_GET_GP(_h, _g)                                     \
-       read_ctx_reg((get_gpregs_ctx(_h)), (_g))
-#define SMC_SET_GP(_h, _g, _v)                                 \
-       write_ctx_reg((get_gpregs_ctx(_h)), (_g), (_v))
-
-/*
- * Convenience macros to access EL3 context registers using handle provided to
- * SMC handler. These take the offset values defined in context.h
- */
-#define SMC_GET_EL3(_h, _e)                                    \
-       read_ctx_reg((get_el3state_ctx(_h)), (_e))
-#define SMC_SET_EL3(_h, _e, _v)                                        \
-       write_ctx_reg((get_el3state_ctx(_h)), (_e), (_v))
-
-/*
- * Helper macro to retrieve the SMC parameters from cpu_context_t.
- */
-#define get_smc_params_from_ctx(_hdl, _x1, _x2, _x3, _x4)      \
-       do {                                                    \
-               const gp_regs_t *regs = get_gpregs_ctx(_hdl);   \
-               _x1 = read_ctx_reg(regs, CTX_GPREG_X1);         \
-               _x2 = read_ctx_reg(regs, CTX_GPREG_X2);         \
-               _x3 = read_ctx_reg(regs, CTX_GPREG_X3);         \
-               _x4 = read_ctx_reg(regs, CTX_GPREG_X4);         \
-       } while (false)
-
-#endif /*__ASSEMBLY__*/
-
-#endif /* SMCCC_HELPERS_H */